Index: head/sys/arm64/arm64/gic.c =================================================================== --- head/sys/arm64/arm64/gic.c (revision 295511) +++ head/sys/arm64/arm64/gic.c (revision 295512) @@ -1,476 +1,453 @@ /*- * Copyright (c) 2011 The FreeBSD Foundation * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Developed by Damjan Marion * * Based on OMAP4 GIC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" /* We are using GICv2 register naming */ /* Distributor Registers */ #define GICD_CTLR 0x000 /* v1 ICDDCR */ #define GICD_TYPER 0x004 /* v1 ICDICTR */ #define GICD_IIDR 0x008 /* v1 ICDIIDR */ #define GICD_IGROUPR(n) (0x0080 + ((n) * 4)) /* v1 ICDISER */ #define GICD_ISENABLER(n) (0x0100 + ((n) * 4)) /* v1 ICDISER */ #define GICD_ICENABLER(n) (0x0180 + ((n) * 4)) /* v1 ICDICER */ #define GICD_ISPENDR(n) (0x0200 + ((n) * 4)) /* v1 ICDISPR */ #define GICD_ICPENDR(n) (0x0280 + ((n) * 4)) /* v1 ICDICPR */ #define GICD_ICACTIVER(n) (0x0380 + ((n) * 4)) /* v1 ICDABR */ #define GICD_IPRIORITYR(n) (0x0400 + ((n) * 4)) /* v1 ICDIPR */ #define GICD_ITARGETSR(n) (0x0800 + ((n) * 4)) /* v1 ICDIPTR */ #define GICD_ICFGR(n) (0x0C00 + ((n) * 4)) /* v1 ICDICFR */ #define GICD_SGIR(n) (0x0F00 + ((n) * 4)) /* v1 ICDSGIR */ /* CPU Registers */ #define GICC_CTLR 0x0000 /* v1 ICCICR */ #define GICC_PMR 0x0004 /* v1 ICCPMR */ #define GICC_BPR 0x0008 /* v1 ICCBPR */ #define GICC_IAR 0x000C /* v1 ICCIAR */ #define GICC_EOIR 0x0010 /* v1 ICCEOIR */ #define GICC_RPR 0x0014 /* v1 ICCRPR */ #define GICC_HPPIR 0x0018 /* v1 ICCHPIR */ #define GICC_ABPR 0x001C /* v1 ICCABPR */ #define GICC_IIDR 0x00FC /* v1 ICCIIDR*/ #define GIC_FIRST_IPI 0 /* Irqs 0-15 are SGIs/IPIs. */ #define GIC_LAST_IPI 15 #define GIC_FIRST_PPI 16 /* Irqs 16-31 are private (per */ #define GIC_LAST_PPI 31 /* core) peripheral interrupts. */ #define GIC_FIRST_SPI 32 /* Irqs 32+ are shared peripherals. */ /* First bit is a polarity bit (0 - low, 1 - high) */ #define GICD_ICFGR_POL_LOW (0 << 0) #define GICD_ICFGR_POL_HIGH (1 << 0) #define GICD_ICFGR_POL_MASK 0x1 /* Second bit is a trigger bit (0 - level, 1 - edge) */ #define GICD_ICFGR_TRIG_LVL (0 << 1) #define GICD_ICFGR_TRIG_EDGE (1 << 1) #define GICD_ICFGR_TRIG_MASK 0x2 static struct resource_spec arm_gic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */ { -1, 0 } }; static struct arm_gic_softc *arm_gic_sc = NULL; #define gic_c_read_4(_sc, _reg) \ bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg)) #define gic_c_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val)) #define gic_d_read_4(_sc, _reg) \ bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg)) #define gic_d_write_4(_sc, _reg, _val) \ bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val)) static pic_dispatch_t gic_dispatch; static pic_eoi_t gic_eoi; static pic_mask_t gic_mask_irq; static pic_unmask_t gic_unmask_irq; #ifdef SMP static void gic_init_secondary(device_t dev) { struct arm_gic_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->nirqs; i += 4) gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0); /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); /* * Activate the timer interrupts: virtual, secure, and non-secure. */ gic_d_write_4(sc, GICD_ISENABLER(27 >> 5), (1UL << (27 & 0x1F))); gic_d_write_4(sc, GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F))); gic_d_write_4(sc, GICD_ISENABLER(30 >> 5), (1UL << (30 & 0x1F))); } #endif int arm_gic_attach(device_t dev) { struct arm_gic_softc *sc; int i; uint32_t icciidr; if (arm_gic_sc) return (ENXIO); sc = device_get_softc(dev); if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->gic_dev = dev; arm_gic_sc = sc; /* Initialize mutex */ mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN); /* Distributor Interface */ sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]); sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]); /* CPU Interface */ sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]); sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]); /* Disable interrupt forwarding to the CPU interface */ gic_d_write_4(sc, GICD_CTLR, 0x00); /* Get the number of interrupts */ sc->nirqs = gic_d_read_4(sc, GICD_TYPER); sc->nirqs = 32 * ((sc->nirqs & 0x1f) + 1); arm_register_root_pic(dev, sc->nirqs); icciidr = gic_c_read_4(sc, GICC_IIDR); device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n", icciidr>>20, (icciidr>>16) & 0xF, (icciidr>>12) & 0xf, (icciidr & 0xfff), sc->nirqs); /* Set all global interrupts to be level triggered, active low. */ for (i = 32; i < sc->nirqs; i += 16) { gic_d_write_4(sc, GICD_ICFGR(i >> 4), 0x00000000); } /* Disable all interrupts. */ for (i = 32; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_ICENABLER(i >> 5), 0xFFFFFFFF); } for (i = 0; i < sc->nirqs; i += 4) { gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0); gic_d_write_4(sc, GICD_ITARGETSR(i >> 2), 1 << 0 | 1 << 8 | 1 << 16 | 1 << 24); } /* Set all the interrupts to be in Group 0 (secure) */ for (i = 0; i < sc->nirqs; i += 32) { gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0); } /* Enable CPU interface */ gic_c_write_4(sc, GICC_CTLR, 1); /* Set priority mask register. */ gic_c_write_4(sc, GICC_PMR, 0xff); /* Enable interrupt distribution */ gic_d_write_4(sc, GICD_CTLR, 0x01); return (0); } static void gic_dispatch(device_t dev, struct trapframe *frame) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t active_irq; int first = 1; while (1) { active_irq = gic_c_read_4(sc, GICC_IAR); /* * Immediatly EOIR the SGIs, because doing so requires the other * bits (ie CPU number), not just the IRQ number, and we do not * have this information later. */ if ((active_irq & 0x3ff) <= GIC_LAST_IPI) gic_c_write_4(sc, GICC_EOIR, active_irq); active_irq &= 0x3FF; if (active_irq == 0x3FF) { if (first) printf("Spurious interrupt detected\n"); return; } arm_dispatch_intr(active_irq, frame); first = 0; } } static void gic_eoi(device_t dev, u_int irq) { struct arm_gic_softc *sc = device_get_softc(dev); gic_c_write_4(sc, GICC_EOIR, irq); } void gic_mask_irq(device_t dev, u_int irq) { struct arm_gic_softc *sc = device_get_softc(dev); gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F))); gic_c_write_4(sc, GICC_EOIR, irq); } void gic_unmask_irq(device_t dev, u_int irq) { struct arm_gic_softc *sc = device_get_softc(dev); gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F))); } #ifdef SMP static void gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi) { struct arm_gic_softc *sc = device_get_softc(dev); uint32_t val = 0, i; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= 1 << (16 + i); gic_d_write_4(sc, GICD_SGIR(0), val | ipi); } - -static int -arm_gic_ipi_read(device_t dev, int i) -{ - - if (i != -1) { - /* - * The intr code will automagically give the frame pointer - * if the interrupt argument is 0. - */ - if ((unsigned int)i > 16) - return (0); - return (i); - } - - return (0x3ff); -} - -static void -arm_gic_ipi_clear(device_t dev, int ipi) -{ - /* no-op */ -} #endif static device_method_t arm_gic_methods[] = { /* Device interface */ DEVMETHOD(device_attach, arm_gic_attach), /* pic_if */ DEVMETHOD(pic_dispatch, gic_dispatch), DEVMETHOD(pic_eoi, gic_eoi), DEVMETHOD(pic_mask, gic_mask_irq), DEVMETHOD(pic_unmask, gic_unmask_irq), #ifdef SMP DEVMETHOD(pic_init_secondary, gic_init_secondary), DEVMETHOD(pic_ipi_send, gic_ipi_send), #endif { 0, 0 } }; DEFINE_CLASS_0(gic, arm_gic_driver, arm_gic_methods, sizeof(struct arm_gic_softc)); #define GICV2M_MSI_TYPER 0x008 #define MSI_TYPER_SPI_BASE(x) (((x) >> 16) & 0x3ff) #define MSI_TYPER_SPI_COUNT(x) (((x) >> 0) & 0x3ff) #define GICv2M_MSI_SETSPI_NS 0x040 #define GICV2M_MSI_IIDR 0xFCC static int gicv2m_attach(device_t dev) { struct gicv2m_softc *sc; uint32_t typer; int rid; sc = device_get_softc(dev); rid = 0; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "Unable to allocate resources\n"); return (ENXIO); } typer = bus_read_4(sc->sc_mem, GICV2M_MSI_TYPER); sc->sc_spi_start = MSI_TYPER_SPI_BASE(typer); sc->sc_spi_count = MSI_TYPER_SPI_COUNT(typer); device_printf(dev, "using spi %u to %u\n", sc->sc_spi_start, sc->sc_spi_start + sc->sc_spi_count - 1); mtx_init(&sc->sc_mutex, "GICv2m lock", "", MTX_DEF); arm_register_msi_pic(dev); return (0); } static int gicv2m_alloc_msix(device_t dev, device_t pci_dev, int *pirq) { struct arm_gic_softc *psc; struct gicv2m_softc *sc; uint32_t reg; int irq; psc = device_get_softc(device_get_parent(dev)); sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); /* Find an unused interrupt */ KASSERT(sc->sc_spi_offset < sc->sc_spi_count, ("No free SPIs")); irq = sc->sc_spi_start + sc->sc_spi_offset; sc->sc_spi_offset++; /* Interrupts need to be edge triggered, set this */ reg = gic_d_read_4(psc, GICD_ICFGR(irq >> 4)); reg |= (GICD_ICFGR_TRIG_EDGE | GICD_ICFGR_POL_HIGH) << ((irq & 0xf) * 2); gic_d_write_4(psc, GICD_ICFGR(irq >> 4), reg); *pirq = irq; mtx_unlock(&sc->sc_mutex); return (0); } static int gicv2m_alloc_msi(device_t dev, device_t pci_dev, int count, int *irqs) { struct arm_gic_softc *psc; struct gicv2m_softc *sc; uint32_t reg; int i, irq; psc = device_get_softc(device_get_parent(dev)); sc = device_get_softc(dev); mtx_lock(&sc->sc_mutex); KASSERT(sc->sc_spi_offset + count <= sc->sc_spi_count, ("No free SPIs for %d MSI interrupts", count)); /* Find an unused interrupt */ for (i = 0; i < count; i++) { irq = sc->sc_spi_start + sc->sc_spi_offset; sc->sc_spi_offset++; /* Interrupts need to be edge triggered, set this */ reg = gic_d_read_4(psc, GICD_ICFGR(irq >> 4)); reg |= (GICD_ICFGR_TRIG_EDGE | GICD_ICFGR_POL_HIGH) << ((irq & 0xf) * 2); gic_d_write_4(psc, GICD_ICFGR(irq >> 4), reg); irqs[i] = irq; } mtx_unlock(&sc->sc_mutex); return (0); } static int gicv2m_map_msi(device_t dev, device_t pci_dev, int irq, uint64_t *addr, uint32_t *data) { struct gicv2m_softc *sc = device_get_softc(dev); *addr = vtophys(rman_get_virtual(sc->sc_mem)) + 0x40; *data = irq; return (0); } static device_method_t arm_gicv2m_methods[] = { /* Device interface */ DEVMETHOD(device_attach, gicv2m_attach), /* MSI/MSI-X */ DEVMETHOD(pic_alloc_msix, gicv2m_alloc_msix), DEVMETHOD(pic_alloc_msi, gicv2m_alloc_msi), DEVMETHOD(pic_map_msi, gicv2m_map_msi), { 0, 0 } }; DEFINE_CLASS_0(gicv2m, arm_gicv2m_driver, arm_gicv2m_methods, sizeof(struct gicv2m_softc)); Index: head/sys/arm64/arm64/gic_v3.c =================================================================== --- head/sys/arm64/arm64/gic_v3.c (revision 295511) +++ head/sys/arm64/arm64/gic_v3.c (revision 295512) @@ -1,719 +1,720 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pic_if.h" #include "gic_v3_reg.h" #include "gic_v3_var.h" /* Device and PIC methods */ static void gic_v3_dispatch(device_t, struct trapframe *); static void gic_v3_eoi(device_t, u_int); static void gic_v3_mask_irq(device_t, u_int); static void gic_v3_unmask_irq(device_t, u_int); #ifdef SMP static void gic_v3_init_secondary(device_t); static void gic_v3_ipi_send(device_t, cpuset_t, u_int); #endif static device_method_t gic_v3_methods[] = { /* Device interface */ DEVMETHOD(device_detach, gic_v3_detach), /* PIC interface */ DEVMETHOD(pic_dispatch, gic_v3_dispatch), DEVMETHOD(pic_eoi, gic_v3_eoi), DEVMETHOD(pic_mask, gic_v3_mask_irq), DEVMETHOD(pic_unmask, gic_v3_unmask_irq), #ifdef SMP DEVMETHOD(pic_init_secondary, gic_v3_init_secondary), DEVMETHOD(pic_ipi_send, gic_v3_ipi_send), #endif /* End */ DEVMETHOD_END }; DEFINE_CLASS_0(gic, gic_v3_driver, gic_v3_methods, sizeof(struct gic_v3_softc)); /* * Driver-specific definitions. */ MALLOC_DEFINE(M_GIC_V3, "GICv3", GIC_V3_DEVSTR); /* * Helper functions and definitions. */ /* Destination registers, either Distributor or Re-Distributor */ enum gic_v3_xdist { DIST = 0, REDIST, }; /* Helper routines starting with gic_v3_ */ static int gic_v3_dist_init(struct gic_v3_softc *); static int gic_v3_redist_alloc(struct gic_v3_softc *); static int gic_v3_redist_find(struct gic_v3_softc *); static int gic_v3_redist_init(struct gic_v3_softc *); static int gic_v3_cpu_init(struct gic_v3_softc *); static void gic_v3_wait_for_rwp(struct gic_v3_softc *, enum gic_v3_xdist); /* A sequence of init functions for primary (boot) CPU */ typedef int (*gic_v3_initseq_t) (struct gic_v3_softc *); /* Primary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_primary_init[] = { gic_v3_dist_init, gic_v3_redist_alloc, gic_v3_redist_init, gic_v3_cpu_init, NULL }; #ifdef SMP /* Secondary CPU initialization sequence */ static gic_v3_initseq_t gic_v3_secondary_init[] = { gic_v3_redist_init, gic_v3_cpu_init, NULL }; #endif /* * Device interface. */ int gic_v3_attach(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; uint32_t typer; int rid; int err; size_t i; sc = device_get_softc(dev); sc->gic_registered = FALSE; sc->dev = dev; err = 0; /* Initialize mutex */ mtx_init(&sc->gic_mtx, "GICv3 lock", NULL, MTX_SPIN); /* * Allocate array of struct resource. * One entry for Distributor and all remaining for Re-Distributor. */ sc->gic_res = malloc( sizeof(*sc->gic_res) * (sc->gic_redists.nregions + 1), M_GIC_V3, M_WAITOK); /* Now allocate corresponding resources */ for (i = 0, rid = 0; i < (sc->gic_redists.nregions + 1); i++, rid++) { sc->gic_res[rid] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->gic_res[rid] == NULL) return (ENXIO); } /* * Distributor interface */ sc->gic_dist = sc->gic_res[0]; /* * Re-Dristributor interface */ /* Allocate space under region descriptions */ sc->gic_redists.regions = malloc( sizeof(*sc->gic_redists.regions) * sc->gic_redists.nregions, M_GIC_V3, M_WAITOK); /* Fill-up bus_space information for each region. */ for (i = 0, rid = 1; i < sc->gic_redists.nregions; i++, rid++) sc->gic_redists.regions[i] = sc->gic_res[rid]; /* Get the number of supported SPI interrupts */ typer = gic_d_read(sc, 4, GICD_TYPER); sc->gic_nirqs = GICD_TYPER_I_NUM(typer); if (sc->gic_nirqs > GIC_I_NUM_MAX) sc->gic_nirqs = GIC_I_NUM_MAX; /* Get the number of supported interrupt identifier bits */ sc->gic_idbits = GICD_TYPER_IDBITS(typer); if (bootverbose) { device_printf(dev, "SPIs: %u, IDs: %u\n", sc->gic_nirqs, (1 << sc->gic_idbits) - 1); } /* Train init sequence for boot CPU */ for (init_func = gic_v3_primary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) return (err); } /* * Full success. * Now register PIC to the interrupts handling layer. */ arm_register_root_pic(dev, sc->gic_nirqs); sc->gic_registered = TRUE; return (0); } int gic_v3_detach(device_t dev) { struct gic_v3_softc *sc; size_t i; int rid; sc = device_get_softc(dev); if (device_is_attached(dev)) { /* * XXX: We should probably deregister PIC */ if (sc->gic_registered) panic("Trying to detach registered PIC"); } for (rid = 0; rid < (sc->gic_redists.nregions + 1); rid++) bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->gic_res[rid]); for (i = 0; i < mp_ncpus; i++) free(sc->gic_redists.pcpu[i], M_GIC_V3); free(sc->gic_res, M_GIC_V3); free(sc->gic_redists.regions, M_GIC_V3); return (0); } /* * PIC interface. */ static void gic_v3_dispatch(device_t dev, struct trapframe *frame) { uint64_t active_irq; while (1) { if (CPU_MATCH_ERRATA_CAVIUM_THUNDER_1_1) { /* * Hardware: Cavium ThunderX * Chip revision: Pass 1.0 (early version) * Pass 1.1 (production) * ERRATUM: 22978, 23154 */ __asm __volatile( "nop;nop;nop;nop;nop;nop;nop;nop; \n" "mrs %0, ICC_IAR1_EL1 \n" "nop;nop;nop;nop; \n" "dsb sy \n" : "=&r" (active_irq)); } else { active_irq = gic_icc_read(IAR1); } if (__predict_false(active_irq == ICC_IAR1_EL1_SPUR)) break; if (__predict_true((active_irq >= GIC_FIRST_PPI && active_irq <= GIC_LAST_SPI) || active_irq >= GIC_FIRST_LPI)) { arm_dispatch_intr(active_irq, frame); continue; } if (active_irq <= GIC_LAST_SGI) { gic_icc_write(EOIR1, (uint64_t)active_irq); arm_dispatch_intr(active_irq, frame); continue; } } } static void gic_v3_eoi(device_t dev, u_int irq) { gic_icc_write(EOIR1, (uint64_t)irq); } static void gic_v3_mask_irq(device_t dev, u_int irq) { struct gic_v3_softc *sc; sc = device_get_softc(dev); if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_r_write(sc, 4, GICD_ICENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ lpi_mask_irq(dev, irq); } else panic("%s: Unsupported IRQ number %u", __func__, irq); } static void gic_v3_unmask_irq(device_t dev, u_int irq) { struct gic_v3_softc *sc; sc = device_get_softc(dev); if (irq <= GIC_LAST_PPI) { /* SGIs and PPIs in corresponding Re-Distributor */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, REDIST); } else if (irq >= GIC_FIRST_SPI && irq <= GIC_LAST_SPI) { /* SPIs in distributor */ gic_d_write(sc, 4, GICD_ISENABLER(irq), GICD_I_MASK(irq)); gic_v3_wait_for_rwp(sc, DIST); } else if (irq >= GIC_FIRST_LPI) { /* LPIs */ lpi_unmask_irq(dev, irq); } else panic("%s: Unsupported IRQ number %u", __func__, irq); } #ifdef SMP static void gic_v3_init_secondary(device_t dev) { struct gic_v3_softc *sc; gic_v3_initseq_t *init_func; int err; sc = device_get_softc(dev); /* Train init sequence for boot CPU */ for (init_func = gic_v3_secondary_init; *init_func != NULL; init_func++) { err = (*init_func)(sc); if (err != 0) { device_printf(dev, "Could not initialize GIC for CPU%u\n", PCPU_GET(cpuid)); return; } } /* * Try to initialize ITS. * If there is no driver attached this routine will fail but that * does not mean failure here as only LPIs will not be functional * on the current CPU. */ if (its_init_cpu(NULL) != 0) { device_printf(dev, "Could not initialize ITS for CPU%u. " "No LPIs will arrive on this CPU\n", PCPU_GET(cpuid)); } /* * ARM64TODO: Unmask timer PPIs. To be removed when appropriate * mechanism is implemented. * Activate the timer interrupts: virtual (27), secure (29), * and non-secure (30). Use hardcoded values here as there * should be no defines for them. */ gic_v3_unmask_irq(dev, 27); gic_v3_unmask_irq(dev, 29); gic_v3_unmask_irq(dev, 30); } static void gic_v3_ipi_send(device_t dev, cpuset_t cpuset, u_int ipi) { u_int cpu; uint64_t aff, tlist; uint64_t val; uint64_t aff_mask; /* Set affinity mask to match level 3, 2 and 1 */ aff_mask = CPU_AFF1_MASK | CPU_AFF2_MASK | CPU_AFF3_MASK; /* Iterate through all CPUs in set */ while (!CPU_EMPTY(&cpuset)) { aff = tlist = 0; for (cpu = 0; cpu < mp_ncpus; cpu++) { /* Compose target list for single AFF3:AFF2:AFF1 set */ if (CPU_ISSET(cpu, &cpuset)) { if (!tlist) { /* * Save affinity of the first CPU to * send IPI to for later comparison. */ aff = CPU_AFFINITY(cpu); tlist |= (1UL << CPU_AFF0(aff)); CPU_CLR(cpu, &cpuset); } /* Check for same Affinity level 3, 2 and 1 */ if ((aff & aff_mask) == (CPU_AFFINITY(cpu) & aff_mask)) { tlist |= (1UL << CPU_AFF0(CPU_AFFINITY(cpu))); /* Clear CPU in cpuset from target list */ CPU_CLR(cpu, &cpuset); } } } if (tlist) { - KASSERT((tlist & ~GICI_SGI_TLIST_MASK) == 0, + KASSERT((tlist & ~ICC_SGI1R_EL1_TL_MASK) == 0, ("Target list too long for GICv3 IPI")); /* Send SGI to CPUs in target list */ val = tlist; - val |= (uint64_t)CPU_AFF3(aff) << GICI_SGI_AFF3_SHIFT; - val |= (uint64_t)CPU_AFF2(aff) << GICI_SGI_AFF2_SHIFT; - val |= (uint64_t)CPU_AFF1(aff) << GICI_SGI_AFF1_SHIFT; - val |= (uint64_t)(ipi & GICI_SGI_IPI_MASK) << GICI_SGI_IPI_SHIFT; + val |= (uint64_t)CPU_AFF3(aff) << ICC_SGI1R_EL1_AFF3_SHIFT; + val |= (uint64_t)CPU_AFF2(aff) << ICC_SGI1R_EL1_AFF2_SHIFT; + val |= (uint64_t)CPU_AFF1(aff) << ICC_SGI1R_EL1_AFF1_SHIFT; + val |= (uint64_t)(ipi & ICC_SGI1R_EL1_SGIID_MASK) << + ICC_SGI1R_EL1_SGIID_SHIFT; gic_icc_write(SGI1R, val); } } } #endif /* * Helper routines */ static void gic_v3_wait_for_rwp(struct gic_v3_softc *sc, enum gic_v3_xdist xdist) { struct resource *res; u_int cpuid; size_t us_left = 1000000; cpuid = PCPU_GET(cpuid); switch (xdist) { case DIST: res = sc->gic_dist; break; case REDIST: res = sc->gic_redists.pcpu[cpuid]; break; default: KASSERT(0, ("%s: Attempt to wait for unknown RWP", __func__)); return; } while ((bus_read_4(res, GICD_CTLR) & GICD_CTLR_RWP) != 0) { DELAY(1); if (us_left-- == 0) panic("GICD Register write pending for too long"); } } /* CPU interface. */ static __inline void gic_v3_cpu_priority(uint64_t mask) { /* Set prority mask */ gic_icc_write(PMR, mask & ICC_PMR_EL1_PRIO_MASK); } static int gic_v3_cpu_enable_sre(struct gic_v3_softc *sc) { uint64_t sre; u_int cpuid; cpuid = PCPU_GET(cpuid); /* * Set the SRE bit to enable access to GIC CPU interface * via system registers. */ sre = READ_SPECIALREG(icc_sre_el1); sre |= ICC_SRE_EL1_SRE; WRITE_SPECIALREG(icc_sre_el1, sre); isb(); /* * Now ensure that the bit is set. */ sre = READ_SPECIALREG(icc_sre_el1); if ((sre & ICC_SRE_EL1_SRE) == 0) { /* We are done. This was disabled in EL2 */ device_printf(sc->dev, "ERROR: CPU%u cannot enable CPU interface " "via system registers\n", cpuid); return (ENXIO); } else if (bootverbose) { device_printf(sc->dev, "CPU%u enabled CPU interface via system registers\n", cpuid); } return (0); } static int gic_v3_cpu_init(struct gic_v3_softc *sc) { int err; /* Enable access to CPU interface via system registers */ err = gic_v3_cpu_enable_sre(sc); if (err != 0) return (err); /* Priority mask to minimum - accept all interrupts */ gic_v3_cpu_priority(GIC_PRIORITY_MIN); /* Disable EOI mode */ gic_icc_clear(CTLR, ICC_CTLR_EL1_EOIMODE); /* Enable group 1 (insecure) interrups */ gic_icc_set(IGRPEN1, ICC_IGRPEN0_EL1_EN); return (0); } /* Distributor */ static int gic_v3_dist_init(struct gic_v3_softc *sc) { uint64_t aff; u_int i; /* * 1. Disable the Distributor */ gic_d_write(sc, 4, GICD_CTLR, 0); gic_v3_wait_for_rwp(sc, DIST); /* * 2. Configure the Distributor */ /* Set all global interrupts to be level triggered, active low. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ICFGRn) gic_d_write(sc, 4, GICD_ICFGR(i), 0x00000000); /* Set priority to all shared interrupts */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_IPRIORITYn) { /* Set highest priority */ gic_d_write(sc, 4, GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } /* * Disable all interrupts. Leave PPI and SGIs as they are enabled in * Re-Distributor registers. */ for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i += GICD_I_PER_ISENABLERn) gic_d_write(sc, 4, GICD_ICENABLER(i), 0xFFFFFFFF); gic_v3_wait_for_rwp(sc, DIST); /* * 3. Enable Distributor */ /* Enable Distributor with ARE, Group 1 */ gic_d_write(sc, 4, GICD_CTLR, GICD_CTLR_ARE_NS | GICD_CTLR_G1A | GICD_CTLR_G1); /* * 4. Route all interrupts to boot CPU. */ aff = CPU_AFFINITY(PCPU_GET(cpuid)); for (i = GIC_FIRST_SPI; i < sc->gic_nirqs; i++) gic_d_write(sc, 4, GICD_IROUTER(i), aff); return (0); } /* Re-Distributor */ static int gic_v3_redist_alloc(struct gic_v3_softc *sc) { u_int cpuid; /* Allocate struct resource for all CPU's Re-Distributor registers */ for (cpuid = 0; cpuid < mp_ncpus; cpuid++) if (CPU_ISSET(cpuid, &all_cpus) != 0) sc->gic_redists.pcpu[cpuid] = malloc(sizeof(*sc->gic_redists.pcpu[0]), M_GIC_V3, M_WAITOK); else sc->gic_redists.pcpu[cpuid] = NULL; return (0); } static int gic_v3_redist_find(struct gic_v3_softc *sc) { struct resource r_res; bus_space_handle_t r_bsh; uint64_t aff; uint64_t typer; uint32_t pidr2; u_int cpuid; size_t i; cpuid = PCPU_GET(cpuid); aff = CPU_AFFINITY(cpuid); /* Affinity in format for comparison with typer */ aff = (CPU_AFF3(aff) << 24) | (CPU_AFF2(aff) << 16) | (CPU_AFF1(aff) << 8) | CPU_AFF0(aff); if (bootverbose) { device_printf(sc->dev, "Start searching for Re-Distributor\n"); } /* Iterate through Re-Distributor regions */ for (i = 0; i < sc->gic_redists.nregions; i++) { /* Take a copy of the region's resource */ r_res = *sc->gic_redists.regions[i]; r_bsh = rman_get_bushandle(&r_res); pidr2 = bus_read_4(&r_res, GICR_PIDR2); switch (pidr2 & GICR_PIDR2_ARCH_MASK) { case GICR_PIDR2_ARCH_GICv3: /* fall through */ case GICR_PIDR2_ARCH_GICv4: break; default: device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENODEV); } do { typer = bus_read_8(&r_res, GICR_TYPER); if ((typer >> GICR_TYPER_AFF_SHIFT) == aff) { KASSERT(sc->gic_redists.pcpu[cpuid] != NULL, ("Invalid pointer to per-CPU redistributor")); /* Copy res contents to its final destination */ *sc->gic_redists.pcpu[cpuid] = r_res; if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor has been found\n", cpuid); } return (0); } r_bsh += (GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE); if ((typer & GICR_TYPER_VLPIS) != 0) { r_bsh += (GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE); } rman_set_bushandle(&r_res, r_bsh); } while ((typer & GICR_TYPER_LAST) == 0); } device_printf(sc->dev, "No Re-Distributor found for CPU%u\n", cpuid); return (ENXIO); } static int gic_v3_redist_wake(struct gic_v3_softc *sc) { uint32_t waker; size_t us_left = 1000000; waker = gic_r_read(sc, 4, GICR_WAKER); /* Wake up Re-Distributor for this CPU */ waker &= ~GICR_WAKER_PS; gic_r_write(sc, 4, GICR_WAKER, waker); /* * When clearing ProcessorSleep bit it is required to wait for * ChildrenAsleep to become zero following the processor power-on. */ while ((gic_r_read(sc, 4, GICR_WAKER) & GICR_WAKER_CA) != 0) { DELAY(1); if (us_left-- == 0) { panic("Could not wake Re-Distributor for CPU%u", PCPU_GET(cpuid)); } } if (bootverbose) { device_printf(sc->dev, "CPU%u Re-Distributor woke up\n", PCPU_GET(cpuid)); } return (0); } static int gic_v3_redist_init(struct gic_v3_softc *sc) { int err; size_t i; err = gic_v3_redist_find(sc); if (err != 0) return (err); err = gic_v3_redist_wake(sc); if (err != 0) return (err); /* Disable SPIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ICENABLER0, GICR_I_ENABLER_PPI_MASK); /* Enable SGIs */ gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICR_ISENABLER0, GICR_I_ENABLER_SGI_MASK); /* Set priority for SGIs and PPIs */ for (i = 0; i <= GIC_LAST_PPI; i += GICR_I_PER_IPRIORITYn) { gic_r_write(sc, 4, GICR_SGI_BASE_SIZE + GICD_IPRIORITYR(i), GIC_PRIORITY_MAX); } gic_v3_wait_for_rwp(sc, REDIST); return (0); } Index: head/sys/arm64/arm64/gic_v3_reg.h =================================================================== --- head/sys/arm64/arm64/gic_v3_reg.h (revision 295511) +++ head/sys/arm64/arm64/gic_v3_reg.h (revision 295512) @@ -1,419 +1,413 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under * the sponsorship of the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _GIC_V3_REG_H_ #define _GIC_V3_REG_H_ /* * Maximum number of interrupts * supported by GIC (including SGIs, PPIs and SPIs) */ #define GIC_I_NUM_MAX (1020) /* * Priority MAX/MIN values */ #define GIC_PRIORITY_MAX (0x00UL) /* Upper value is determined by LPI max priority */ #define GIC_PRIORITY_MIN (0xFCUL) /* Numbers for software generated interrupts */ #define GIC_FIRST_SGI (0) #define GIC_LAST_SGI (15) /* Numbers for private peripheral interrupts */ #define GIC_FIRST_PPI (16) #define GIC_LAST_PPI (31) /* Numbers for spared peripheral interrupts */ #define GIC_FIRST_SPI (32) #define GIC_LAST_SPI (1019) /* Numbers for local peripheral interrupts */ #define GIC_FIRST_LPI (8192) /* * Registers (v2/v3) */ #define GICD_CTLR (0x0000) #define GICD_CTLR_G1 (1 << 0) #define GICD_CTLR_G1A (1 << 1) #define GICD_CTLR_ARE_NS (1 << 4) #define GICD_CTLR_RWP (1 << 31) #define GICD_TYPER (0x0004) #define GICD_TYPER_IDBITS(n) ((((n) >> 19) & 0x1F) + 1) #define GICD_TYPER_I_NUM(n) ((((n) & 0xF1) + 1) * 32) #define GICD_ISENABLER(n) (0x0100 + (((n) >> 5) * 4)) #define GICD_I_PER_ISENABLERn (32) #define GICD_ICENABLER(n) (0x0180 + (((n) >> 5) * 4)) #define GICD_IPRIORITYR(n) (0x0400 + (((n) >> 2) * 4)) #define GICD_I_PER_IPRIORITYn (4) #define GICD_I_MASK(n) (1 << ((n) % 32)) #define GICD_ICFGR(n) (0x0C00 + (((n) >> 4) * 4)) /* First bit is a polarity bit (0 - low, 1 - high) */ #define GICD_ICFGR_POL_LOW (0 << 0) #define GICD_ICFGR_POL_HIGH (1 << 0) #define GICD_ICFGR_POL_MASK (0x1) /* Second bit is a trigger bit (0 - level, 1 - edge) */ #define GICD_ICFGR_TRIG_LVL (0 << 1) #define GICD_ICFGR_TRIG_EDGE (1 << 1) #define GICD_ICFGR_TRIG_MASK (0x2) #define GICD_I_PER_ICFGRn (16) /* * Registers (v3) */ #define GICD_IROUTER(n) (0x6000 + ((n) * 8)) #define GICD_PIDR2 (0xFFE8) #define GICR_PIDR2_ARCH_MASK (0xF0) #define GICR_PIDR2_ARCH_GICv3 (0x30) #define GICR_PIDR2_ARCH_GICv4 (0x40) /* Redistributor registers */ #define GICR_CTLR GICD_CTLR #define GICR_CTLR_LPI_ENABLE (1 << 0) #define GICR_PIDR2 GICD_PIDR2 #define GICR_TYPER (0x0008) #define GICR_TYPER_PLPIS (1 << 0) #define GICR_TYPER_VLPIS (1 << 1) #define GICR_TYPER_LAST (1 << 4) #define GICR_TYPER_CPUNUM_SHIFT (8) #define GICR_TYPER_CPUNUM_MASK (0xFFFUL << GICR_TYPER_CPUNUM_SHIFT) #define GICR_TYPER_CPUNUM(x) \ (((x) & GICR_TYPER_CPUNUM_MASK) >> GICR_TYPER_CPUNUM_SHIFT) #define GICR_TYPER_AFF_SHIFT (32) #define GICR_WAKER (0x0014) #define GICR_WAKER_PS (1 << 1) /* Processor sleep */ #define GICR_WAKER_CA (1 << 2) /* Children asleep */ #define GICR_PROPBASER (0x0070) #define GICR_PROPBASER_IDBITS_MASK 0x1FUL /* * Cacheability * 0x0 - Device-nGnRnE * 0x1 - Normal Inner Non-cacheable * 0x2 - Normal Inner Read-allocate, Write-through * 0x3 - Normal Inner Read-allocate, Write-back * 0x4 - Normal Inner Write-allocate, Write-through * 0x5 - Normal Inner Write-allocate, Write-back * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back */ #define GICR_PROPBASER_CACHE_SHIFT 7 #define GICR_PROPBASER_CACHE_DnGnRnE 0x0UL #define GICR_PROPBASER_CACHE_NIN 0x1UL #define GICR_PROPBASER_CACHE_NIRAWT 0x2UL #define GICR_PROPBASER_CACHE_NIRAWB 0x3UL #define GICR_PROPBASER_CACHE_NIWAWT 0x4UL #define GICR_PROPBASER_CACHE_NIWAWB 0x5UL #define GICR_PROPBASER_CACHE_NIRAWAWT 0x6UL #define GICR_PROPBASER_CACHE_NIRAWAWB 0x7UL /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GICR_PROPBASER_SHARE_SHIFT 10 #define GICR_PROPBASER_SHARE_NS 0x0UL #define GICR_PROPBASER_SHARE_IS 0x1UL #define GICR_PROPBASER_SHARE_OS 0x2UL #define GICR_PROPBASER_SHARE_RES 0x3UL #define GICR_PROPBASER_SHARE_MASK \ (0x3UL << GICR_PROPBASER_SHARE_SHIFT) #define GICR_PENDBASER (0x0078) /* * Cacheability * 0x0 - Device-nGnRnE * 0x1 - Normal Inner Non-cacheable * 0x2 - Normal Inner Read-allocate, Write-through * 0x3 - Normal Inner Read-allocate, Write-back * 0x4 - Normal Inner Write-allocate, Write-through * 0x5 - Normal Inner Write-allocate, Write-back * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back */ #define GICR_PENDBASER_CACHE_SHIFT 7 #define GICR_PENDBASER_CACHE_DnGnRnE 0x0UL #define GICR_PENDBASER_CACHE_NIN 0x1UL #define GICR_PENDBASER_CACHE_NIRAWT 0x2UL #define GICR_PENDBASER_CACHE_NIRAWB 0x3UL #define GICR_PENDBASER_CACHE_NIWAWT 0x4UL #define GICR_PENDBASER_CACHE_NIWAWB 0x5UL #define GICR_PENDBASER_CACHE_NIRAWAWT 0x6UL #define GICR_PENDBASER_CACHE_NIRAWAWB 0x7UL /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GICR_PENDBASER_SHARE_SHIFT 10 #define GICR_PENDBASER_SHARE_NS 0x0UL #define GICR_PENDBASER_SHARE_IS 0x1UL #define GICR_PENDBASER_SHARE_OS 0x2UL #define GICR_PENDBASER_SHARE_RES 0x3UL #define GICR_PENDBASER_SHARE_MASK \ (0x3UL << GICR_PENDBASER_SHARE_SHIFT) /* Re-distributor registers for SGIs and PPIs */ #define GICR_RD_BASE_SIZE PAGE_SIZE_64K #define GICR_SGI_BASE_SIZE PAGE_SIZE_64K #define GICR_VLPI_BASE_SIZE PAGE_SIZE_64K #define GICR_RESERVED_SIZE PAGE_SIZE_64K #define GICR_ISENABLER0 (0x0100) #define GICR_ICENABLER0 (0x0180) #define GICR_I_ENABLER_SGI_MASK (0x0000FFFF) #define GICR_I_ENABLER_PPI_MASK (0xFFFF0000) #define GICR_I_PER_IPRIORITYn (GICD_I_PER_IPRIORITYn) /* ITS registers */ #define GITS_PIDR2 GICR_PIDR2 #define GITS_PIDR2_ARCH_MASK GICR_PIDR2_ARCH_MASK #define GITS_PIDR2_ARCH_GICv3 GICR_PIDR2_ARCH_GICv3 #define GITS_PIDR2_ARCH_GICv4 GICR_PIDR2_ARCH_GICv4 #define GITS_CTLR (0x0000) #define GITS_CTLR_EN (1 << 0) #define GITS_CBASER (0x0080) #define GITS_CBASER_VALID (1UL << 63) /* * Cacheability * 0x0 - Device-nGnRnE * 0x1 - Normal Inner Non-cacheable * 0x2 - Normal Inner Read-allocate, Write-through * 0x3 - Normal Inner Read-allocate, Write-back * 0x4 - Normal Inner Write-allocate, Write-through * 0x5 - Normal Inner Write-allocate, Write-back * 0x6 - Normal Inner Read-allocate, Write-allocate, Write-through * 0x7 - Normal Inner Read-allocate, Write-allocate, Write-back */ #define GITS_CBASER_CACHE_SHIFT 59 #define GITS_CBASER_CACHE_DnGnRnE 0x0UL #define GITS_CBASER_CACHE_NIN 0x1UL #define GITS_CBASER_CACHE_NIRAWT 0x2UL #define GITS_CBASER_CACHE_NIRAWB 0x3UL #define GITS_CBASER_CACHE_NIWAWT 0x4UL #define GITS_CBASER_CACHE_NIWAWB 0x5UL #define GITS_CBASER_CACHE_NIRAWAWT 0x6UL #define GITS_CBASER_CACHE_NIRAWAWB 0x7UL #define GITS_CBASER_CACHE_MASK (0x7UL << GITS_CBASER_TYPE_SHIFT) /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GITS_CBASER_SHARE_SHIFT 10 #define GITS_CBASER_SHARE_NS 0x0UL #define GITS_CBASER_SHARE_IS 0x1UL #define GITS_CBASER_SHARE_OS 0x2UL #define GITS_CBASER_SHARE_RES 0x3UL #define GITS_CBASER_SHARE_MASK \ (0x3UL << GITS_CBASER_SHARE_SHIFT) #define GITS_CBASER_PA_SHIFT 12 #define GITS_CBASER_PA_MASK (0xFFFFFFFFFUL << GITS_CBASER_PA_SHIFT) #define GITS_CWRITER (0x0088) #define GITS_CREADR (0x0090) #define GITS_BASER_BASE (0x0100) #define GITS_BASER(x) (GITS_BASER_BASE + (x) * 8) #define GITS_BASER_VALID (1UL << 63) #define GITS_BASER_TYPE_SHIFT 56 #define GITS_BASER_TYPE(x) \ (((x) & GITS_BASER_TYPE_MASK) >> GITS_BASER_TYPE_SHIFT) #define GITS_BASER_TYPE_UNIMPL 0x0UL /* Unimplemented */ #define GITS_BASER_TYPE_DEV 0x1UL /* Devices */ #define GITS_BASER_TYPE_VP 0x2UL /* Virtual Processors */ #define GITS_BASER_TYPE_PP 0x3UL /* Physical Processors */ #define GITS_BASER_TYPE_IC 0x4UL /* Interrupt Collections */ #define GITS_BASER_TYPE_RES5 0x5UL /* Reserved */ #define GITS_BASER_TYPE_RES6 0x6UL /* Reserved */ #define GITS_BASER_TYPE_RES7 0x7UL /* Reserved */ #define GITS_BASER_TYPE_MASK (0x7UL << GITS_BASER_TYPE_SHIFT) /* * Cacheability * 0x0 - Non-cacheable, non-bufferable * 0x1 - Non-cacheable * 0x2 - Read-allocate, Write-through * 0x3 - Read-allocate, Write-back * 0x4 - Write-allocate, Write-through * 0x5 - Write-allocate, Write-back * 0x6 - Read-allocate, Write-allocate, Write-through * 0x7 - Read-allocate, Write-allocate, Write-back */ #define GITS_BASER_CACHE_SHIFT 59 #define GITS_BASER_CACHE_NCNB 0x0UL #define GITS_BASER_CACHE_NC 0x1UL #define GITS_BASER_CACHE_RAWT 0x2UL #define GITS_BASER_CACHE_RAWB 0x3UL #define GITS_BASER_CACHE_WAWT 0x4UL #define GITS_BASER_CACHE_WAWB 0x5UL #define GITS_BASER_CACHE_RAWAWT 0x6UL #define GITS_BASER_CACHE_RAWAWB 0x7UL #define GITS_BASER_CACHE_MASK (0x7UL << GITS_BASER_CACHE_SHIFT) #define GITS_BASER_ESIZE_SHIFT 48 #define GITS_BASER_ESIZE_MASK (0x1FUL << GITS_BASER_ESIZE_SHIFT) #define GITS_BASER_ESIZE(x) \ ((((x) & GITS_BASER_ESIZE_MASK) >> GITS_BASER_ESIZE_SHIFT) + 1) #define GITS_BASER_PA_SHIFT 12 #define GITS_BASER_PA_MASK (0xFFFFFFFFFUL << GITS_BASER_PA_SHIFT) /* * Shareability * 0x0 - Non-shareable * 0x1 - Inner-shareable * 0x2 - Outer-shareable * 0x3 - Reserved. Threated as 0x0 */ #define GITS_BASER_SHARE_SHIFT 10 #define GITS_BASER_SHARE_NS 0x0UL #define GITS_BASER_SHARE_IS 0x1UL #define GITS_BASER_SHARE_OS 0x2UL #define GITS_BASER_SHARE_RES 0x3UL #define GITS_BASER_SHARE_MASK (0x3UL << GITS_BASER_SHARE_SHIFT) #define GITS_BASER_PSZ_SHIFT 8 #define GITS_BASER_PSZ_4K 0x0UL #define GITS_BASER_PSZ_16K 0x1UL #define GITS_BASER_PSZ_64K 0x2UL #define GITS_BASER_PSZ_MASK (0x3UL << GITS_BASER_PSZ_SHIFT) #define GITS_BASER_SIZE_MASK 0xFFUL #define GITS_BASER_NUM 8 #define GITS_TYPER (0x0008) #define GITS_TYPER_PTA (1UL << 19) #define GITS_TYPER_DEVB_SHIFT 13 #define GITS_TYPER_DEVB_MASK (0x1FUL << GITS_TYPER_DEVB_SHIFT) /* Number of device identifiers implemented */ #define GITS_TYPER_DEVB(x) \ ((((x) & GITS_TYPER_DEVB_MASK) >> GITS_TYPER_DEVB_SHIFT) + 1) #define GITS_TYPER_ITTES_SHIFT 4 #define GITS_TYPER_ITTES_MASK (0xFUL << GITS_TYPER_ITTES_SHIFT) /* Number of bytes per ITT Entry */ #define GITS_TYPER_ITTES(x) \ ((((x) & GITS_TYPER_ITTES_MASK) >> GITS_TYPER_ITTES_SHIFT) + 1) #define GITS_TRANSLATER (0x10040) /* * LPI related */ #define LPI_CONF_PRIO_MASK (0xFC) #define LPI_CONF_GROUP1 (1 << 1) #define LPI_CONF_ENABLE (1 << 0) /* * CPU interface */ -#define GICI_SGI_TLIST_MASK (0xffffUL) -#define GICI_SGI_AFF1_SHIFT (16UL) -#define GICI_SGI_AFF2_SHIFT (32UL) -#define GICI_SGI_AFF3_SHIFT (48UL) -#define GICI_SGI_IPI_MASK (0xfUL) -#define GICI_SGI_IPI_SHIFT (24UL) /* * Registers list (ICC_xyz_EL1): * * PMR - Priority Mask Register * * interrupts of priority higher than specified * in this mask will be signalled to the CPU. * (0xff - lowest possible prio., 0x00 - highest prio.) * * CTLR - Control Register * * controls behavior of the CPU interface and displays * implemented features. * * IGRPEN1 - Interrupt Group 1 Enable Register * * IAR1 - Interrupt Acknowledge Register Group 1 * * contains number of the highest priority pending * interrupt from the Group 1. * * EOIR1 - End of Interrupt Register Group 1 * * Writes inform CPU interface about completed Group 1 * interrupts processing. */ #define gic_icc_write(reg, val) \ do { \ WRITE_SPECIALREG(ICC_ ##reg ##_EL1, val); \ isb(); \ } while (0) #define gic_icc_read(reg) \ ({ \ uint64_t val; \ \ val = READ_SPECIALREG(ICC_ ##reg ##_EL1); \ (val); \ }) #define gic_icc_set(reg, mask) \ do { \ uint64_t val; \ val = gic_icc_read(reg); \ val |= (mask); \ gic_icc_write(reg, val); \ } while (0) #define gic_icc_clear(reg, mask) \ do { \ uint64_t val; \ val = gic_icc_read(reg); \ val &= ~(mask); \ gic_icc_write(reg, val); \ } while (0) #endif /* _GIC_V3_REG_H_ */ Index: head/sys/arm64/include/armreg.h =================================================================== --- head/sys/arm64/include/armreg.h (revision 295511) +++ head/sys/arm64/include/armreg.h (revision 295512) @@ -1,395 +1,404 @@ /*- * Copyright (c) 2013, 2014 Andrew Turner * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_ARMREG_H_ #define _MACHINE_ARMREG_H_ #define INSN_SIZE 4 #define READ_SPECIALREG(reg) \ ({ uint64_t val; \ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (val)); \ val; \ }) #define WRITE_SPECIALREG(reg, val) \ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)val)) /* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */ #define CNTHCTL_EVNTI_MASK (0xf << 4) /* Bit to trigger event stream */ #define CNTHCTL_EVNTDIR (1 << 3) /* Control transition trigger bit */ #define CNTHCTL_EVNTEN (1 << 2) /* Enable event stream */ #define CNTHCTL_EL1PCEN (1 << 1) /* Allow EL0/1 physical timer access */ #define CNTHCTL_EL1PCTEN (1 << 0) /*Allow EL0/1 physical counter access*/ /* CPACR_EL1 */ #define CPACR_FPEN_MASK (0x3 << 20) #define CPACR_FPEN_TRAP_ALL1 (0x0 << 20) /* Traps from EL0 and EL1 */ #define CPACR_FPEN_TRAP_EL0 (0x1 << 20) /* Traps from EL0 */ #define CPACR_FPEN_TRAP_ALL2 (0x2 << 20) /* Traps from EL0 and EL1 */ #define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */ #define CPACR_TTA (0x1 << 28) /* CTR_EL0 - Cache Type Register */ #define CTR_DLINE_SHIFT 16 #define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT) #define CTR_DLINE_SIZE(reg) (((reg) & CTR_DLINE_MASK) >> CTR_DLINE_SHIFT) #define CTR_ILINE_SHIFT 0 #define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT) #define CTR_ILINE_SIZE(reg) (((reg) & CTR_ILINE_MASK) >> CTR_ILINE_SHIFT) /* ESR_ELx */ #define ESR_ELx_ISS_MASK 0x00ffffff #define ISS_INSN_FnV (0x01 << 10) #define ISS_INSN_EA (0x01 << 9) #define ISS_INSN_S1PTW (0x01 << 7) #define ISS_INSN_IFSC_MASK (0x1f << 0) #define ISS_DATA_ISV (0x01 << 24) #define ISS_DATA_SAS_MASK (0x03 << 22) #define ISS_DATA_SSE (0x01 << 21) #define ISS_DATA_SRT_MASK (0x1f << 16) #define ISS_DATA_SF (0x01 << 15) #define ISS_DATA_AR (0x01 << 14) #define ISS_DATA_FnV (0x01 << 10) #define ISS_DATa_EA (0x01 << 9) #define ISS_DATa_CM (0x01 << 8) #define ISS_INSN_S1PTW (0x01 << 7) #define ISS_DATa_WnR (0x01 << 6) #define ISS_DATA_DFSC_MASK (0x1f << 0) #define ESR_ELx_IL (0x01 << 25) #define ESR_ELx_EC_SHIFT 26 #define ESR_ELx_EC_MASK (0x3f << 26) #define ESR_ELx_EXCEPTION(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) #define EXCP_UNKNOWN 0x00 /* Unkwn exception */ #define EXCP_FP_SIMD 0x07 /* VFP/SIMD trap */ #define EXCP_ILL_STATE 0x0e /* Illegal execution state */ #define EXCP_SVC 0x15 /* SVC trap */ #define EXCP_MSR 0x18 /* MSR/MRS trap */ #define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */ #define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */ #define EXCP_PC_ALIGN 0x22 /* PC alignment fault */ #define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */ #define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */ #define EXCP_SP_ALIGN 0x26 /* SP slignment fault */ #define EXCP_TRAP_FP 0x2c /* Trapped FP exception */ #define EXCP_SERROR 0x2f /* SError interrupt */ #define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */ #define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */ #define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */ #define EXCP_BRK 0x3c /* Breakpoint */ /* ICC_CTLR_EL1 */ #define ICC_CTLR_EL1_EOIMODE (1U << 1) /* ICC_IAR1_EL1 */ #define ICC_IAR1_EL1_SPUR (0x03ff) /* ICC_IGRPEN0_EL1 */ #define ICC_IGRPEN0_EL1_EN (1U << 0) /* ICC_PMR_EL1 */ #define ICC_PMR_EL1_PRIO_MASK (0xFFUL) +/* ICC_SGI1R_EL1 */ +#define ICC_SGI1R_EL1_TL_MASK 0xffffUL +#define ICC_SGI1R_EL1_AFF1_SHIFT 16 +#define ICC_SGI1R_EL1_SGIID_SHIFT 24 +#define ICC_SGI1R_EL1_AFF2_SHIFT 32 +#define ICC_SGI1R_EL1_AFF3_SHIFT 48 +#define ICC_SGI1R_EL1_SGIID_MASK 0xfUL +#define ICC_SGI1R_EL1_IRM (0x1UL << 40) + /* ICC_SRE_EL1 */ #define ICC_SRE_EL1_SRE (1U << 0) /* ICC_SRE_EL2 */ #define ICC_SRE_EL2_EN (1U << 3) /* ID_AA64DFR0_EL1 */ #define ID_AA64DFR0_MASK 0xf0f0ffff #define ID_AA64DFR0_DEBUG_VER_SHIFT 0 #define ID_AA64DFR0_DEBUG_VER_MASK (0xf << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_DEBUG_VER(x) ((x) & ID_AA64DFR0_DEBUG_VER_MASK) #define ID_AA64DFR0_DEBUG_VER_8 (0x6 << ID_AA64DFR0_DEBUG_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER_SHIFT 4 #define ID_AA64DFR0_TRACE_VER_MASK (0xf << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER(x) ((x) & ID_AA64DFR0_TRACE_VER_MASK) #define ID_AA64DFR0_TRACE_VER_NONE (0x0 << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_TRACE_VER_IMPL (0x1 << ID_AA64DFR0_TRACE_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_SHIFT 8 #define ID_AA64DFR0_PMU_VER_MASK (0xf << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER(x) ((x) & ID_AA64DFR0_PMU_VER_MASK) #define ID_AA64DFR0_PMU_VER_NONE (0x0 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_3 (0x1 << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_PMU_VER_IMPL (0xf << ID_AA64DFR0_PMU_VER_SHIFT) #define ID_AA64DFR0_BRPS_SHIFT 12 #define ID_AA64DFR0_BRPS_MASK (0xf << ID_AA64DFR0_BRPS_SHIFT) #define ID_AA64DFR0_BRPS(x) \ ((((x) >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_WRPS_SHIFT 20 #define ID_AA64DFR0_WRPS_MASK (0xf << ID_AA64DFR0_WRPS_SHIFT) #define ID_AA64DFR0_WRPS(x) \ ((((x) >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) + 1) #define ID_AA64DFR0_CTX_CMPS_SHIFT 28 #define ID_AA64DFR0_CTX_CMPS_MASK (0xf << ID_AA64DFR0_CTX_CMPS_SHIFT) #define ID_AA64DFR0_CTX_CMPS(x) \ ((((x) >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) + 1) /* ID_AA64ISAR0_EL1 */ #define ID_AA64ISAR0_MASK 0x000ffff0 #define ID_AA64ISAR0_AES_SHIFT 4 #define ID_AA64ISAR0_AES_MASK (0xf << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES(x) ((x) & ID_AA64ISAR0_AES_MASK) #define ID_AA64ISAR0_AES_NONE (0x0 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES_BASE (0x1 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_AES_PMULL (0x2 << ID_AA64ISAR0_AES_SHIFT) #define ID_AA64ISAR0_SHA1_SHIFT 8 #define ID_AA64ISAR0_SHA1_MASK (0xf << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA1(x) ((x) & ID_AA64ISAR0_SHA1_MASK) #define ID_AA64ISAR0_SHA1_NONE (0x0 << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA1_BASE (0x1 << ID_AA64ISAR0_SHA1_SHIFT) #define ID_AA64ISAR0_SHA2_SHIFT 12 #define ID_AA64ISAR0_SHA2_MASK (0xf << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2(x) ((x) & ID_AA64ISAR0_SHA2_MASK) #define ID_AA64ISAR0_SHA2_NONE (0x0 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_SHA2_BASE (0x1 << ID_AA64ISAR0_SHA2_SHIFT) #define ID_AA64ISAR0_CRC32_SHIFT 16 #define ID_AA64ISAR0_CRC32_MASK (0xf << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_CRC32(x) ((x) & ID_AA64ISAR0_CRC32_MASK) #define ID_AA64ISAR0_CRC32_NONE (0x0 << ID_AA64ISAR0_CRC32_SHIFT) #define ID_AA64ISAR0_CRC32_BASE (0x1 << ID_AA64ISAR0_CRC32_SHIFT) /* ID_AA64MMFR0_EL1 */ #define ID_AA64MMFR0_MASK 0xffffffff #define ID_AA64MMFR0_PA_RANGE_SHIFT 0 #define ID_AA64MMFR0_PA_RANGE_MASK (0xf << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE(x) ((x) & ID_AA64MMFR0_PA_RANGE_MASK) #define ID_AA64MMFR0_PA_RANGE_4G (0x0 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_64G (0x1 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_1T (0x2 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_4T (0x3 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_16T (0x4 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_PA_RANGE_256T (0x5 << ID_AA64MMFR0_PA_RANGE_SHIFT) #define ID_AA64MMFR0_ASID_BITS_SHIFT 4 #define ID_AA64MMFR0_ASID_BITS_MASK (0xf << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_ASID_BITS(x) ((x) & ID_AA64MMFR0_ASID_BITS_MASK) #define ID_AA64MMFR0_ASID_BITS_8 (0x0 << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_ASID_BITS_16 (0x2 << ID_AA64MMFR0_ASID_BITS_SHIFT) #define ID_AA64MMFR0_BIGEND_SHIFT 8 #define ID_AA64MMFR0_BIGEND_MASK (0xf << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_BIGEND(x) ((x) & ID_AA64MMFR0_BIGEND_MASK) #define ID_AA64MMFR0_BIGEND_FIXED (0x0 << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_BIGEND_MIXED (0x1 << ID_AA64MMFR0_BIGEND_SHIFT) #define ID_AA64MMFR0_S_NS_MEM_SHIFT 12 #define ID_AA64MMFR0_S_NS_MEM_MASK (0xf << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_S_NS_MEM(x) ((x) & ID_AA64MMFR0_S_NS_MEM_MASK) #define ID_AA64MMFR0_S_NS_MEM_NONE (0x0 << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_S_NS_MEM_DISTINCT (0x1 << ID_AA64MMFR0_S_NS_MEM_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0_SHIFT 16 #define ID_AA64MMFR0_BIGEND_EL0_MASK (0xf << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0(x) ((x) & ID_AA64MMFR0_BIGEND_EL0_MASK) #define ID_AA64MMFR0_BIGEND_EL0_FIXED (0x0 << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_BIGEND_EL0_MIXED (0x1 << ID_AA64MMFR0_BIGEND_EL0_SHIFT) #define ID_AA64MMFR0_TGRAN16_SHIFT 20 #define ID_AA64MMFR0_TGRAN16_MASK (0xf << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN16(x) ((x) & ID_AA64MMFR0_TGRAN16_MASK) #define ID_AA64MMFR0_TGRAN16_NONE (0x0 << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN16_IMPL (0x1 << ID_AA64MMFR0_TGRAN16_SHIFT) #define ID_AA64MMFR0_TGRAN64_SHIFT 24 #define ID_AA64MMFR0_TGRAN64_MASK (0xf << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN64(x) ((x) & ID_AA64MMFR0_TGRAN64_MASK) #define ID_AA64MMFR0_TGRAN64_IMPL (0x0 << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN64_NONE (0xf << ID_AA64MMFR0_TGRAN64_SHIFT) #define ID_AA64MMFR0_TGRAN4_SHIFT 28 #define ID_AA64MMFR0_TGRAN4_MASK (0xf << ID_AA64MMFR0_TGRAN4_SHIFT) #define ID_AA64MMFR0_TGRAN4(x) ((x) & ID_AA64MMFR0_TGRAN4_MASK) #define ID_AA64MMFR0_TGRAN4_IMPL (0x0 << ID_AA64MMFR0_TGRAN4_SHIFT) #define ID_AA64MMFR0_TGRAN4_NONE (0xf << ID_AA64MMFR0_TGRAN4_SHIFT) /* ID_AA64PFR0_EL1 */ #define ID_AA64PFR0_MASK 0x0fffffff #define ID_AA64PFR0_EL0_SHIFT 0 #define ID_AA64PFR0_EL0_MASK (0xf << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL0(x) ((x) & ID_AA64PFR0_EL0_MASK) #define ID_AA64PFR0_EL0_64 (1 << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL0_64_32 (2 << ID_AA64PFR0_EL0_SHIFT) #define ID_AA64PFR0_EL1_SHIFT 4 #define ID_AA64PFR0_EL1_MASK (0xf << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL1(x) ((x) & ID_AA64PFR0_EL1_MASK) #define ID_AA64PFR0_EL1_64 (1 << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL1_64_32 (2 << ID_AA64PFR0_EL1_SHIFT) #define ID_AA64PFR0_EL2_SHIFT 8 #define ID_AA64PFR0_EL2_MASK (0xf << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2(x) ((x) & ID_AA64PFR0_EL2_MASK) #define ID_AA64PFR0_EL2_NONE (0 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2_64 (1 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL2_64_32 (2 << ID_AA64PFR0_EL2_SHIFT) #define ID_AA64PFR0_EL3_SHIFT 12 #define ID_AA64PFR0_EL3_MASK (0xf << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3(x) ((x) & ID_AA64PFR0_EL3_MASK) #define ID_AA64PFR0_EL3_NONE (0 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3_64 (1 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_EL3_64_32 (2 << ID_AA64PFR0_EL3_SHIFT) #define ID_AA64PFR0_FP_SHIFT 16 #define ID_AA64PFR0_FP_MASK (0xf << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP(x) ((x) & ID_AA64PFR0_FP_MASK) #define ID_AA64PFR0_FP_IMPL (0x0 << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_FP_NONE (0xf << ID_AA64PFR0_FP_SHIFT) #define ID_AA64PFR0_ADV_SIMD_SHIFT 20 #define ID_AA64PFR0_ADV_SIMD_MASK (0xf << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD(x) ((x) & ID_AA64PFR0_ADV_SIMD_MASK) #define ID_AA64PFR0_ADV_SIMD_IMPL (0x0 << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_ADV_SIMD_NONE (0xf << ID_AA64PFR0_ADV_SIMD_SHIFT) #define ID_AA64PFR0_GIC_BITS 0x4 /* Number of bits in GIC field */ #define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_GIC_MASK (0xf << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_GIC(x) ((x) & ID_AA64PFR0_GIC_MASK) #define ID_AA64PFR0_GIC_CPUIF_NONE (0x0 << ID_AA64PFR0_GIC_SHIFT) #define ID_AA64PFR0_GIC_CPUIF_EN (0x1 << ID_AA64PFR0_GIC_SHIFT) /* MAIR_EL1 - Memory Attribute Indirection Register */ #define MAIR_ATTR_MASK(idx) (0xff << ((n)* 8)) #define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8)) /* SCTLR_EL1 - System Control Register */ #define SCTLR_RES0 0xc8222400 /* Reserved, write 0 */ #define SCTLR_RES1 0x30d00800 /* Reserved, write 1 */ #define SCTLR_M 0x00000001 #define SCTLR_A 0x00000002 #define SCTLR_C 0x00000004 #define SCTLR_SA 0x00000008 #define SCTLR_SA0 0x00000010 #define SCTLR_CP15BEN 0x00000020 #define SCTLR_THEE 0x00000040 #define SCTLR_ITD 0x00000080 #define SCTLR_SED 0x00000100 #define SCTLR_UMA 0x00000200 #define SCTLR_I 0x00001000 #define SCTLR_DZE 0x00004000 #define SCTLR_UCT 0x00008000 #define SCTLR_nTWI 0x00010000 #define SCTLR_nTWE 0x00040000 #define SCTLR_WXN 0x00080000 #define SCTLR_EOE 0x01000000 #define SCTLR_EE 0x02000000 #define SCTLR_UCI 0x04000000 /* SPSR_EL1 */ /* * When the exception is taken in AArch64: * M[4] is 0 for AArch64 mode * M[3:2] is the exception level * M[1] is unused * M[0] is the SP select: * 0: always SP0 * 1: current ELs SP */ #define PSR_M_EL0t 0x00000000 #define PSR_M_EL1t 0x00000004 #define PSR_M_EL1h 0x00000005 #define PSR_M_EL2t 0x00000008 #define PSR_M_EL2h 0x00000009 #define PSR_M_MASK 0x0000001f #define PSR_F 0x00000040 #define PSR_I 0x00000080 #define PSR_A 0x00000100 #define PSR_D 0x00000200 #define PSR_IL 0x00100000 #define PSR_SS 0x00200000 #define PSR_V 0x10000000 #define PSR_C 0x20000000 #define PSR_Z 0x40000000 #define PSR_N 0x80000000 /* TCR_EL1 - Translation Control Register */ #define TCR_ASID_16 (1 << 36) #define TCR_IPS_SHIFT 32 #define TCR_IPS_32BIT (0 << TCR_IPS_SHIFT) #define TCR_IPS_36BIT (1 << TCR_IPS_SHIFT) #define TCR_IPS_40BIT (2 << TCR_IPS_SHIFT) #define TCR_IPS_42BIT (3 << TCR_IPS_SHIFT) #define TCR_IPS_44BIT (4 << TCR_IPS_SHIFT) #define TCR_IPS_48BIT (5 << TCR_IPS_SHIFT) #define TCR_TG1_SHIFT 30 #define TCR_TG1_16K (1 << TCR_TG1_SHIFT) #define TCR_TG1_4K (2 << TCR_TG1_SHIFT) #define TCR_TG1_64K (3 << TCR_TG1_SHIFT) #define TCR_SH1_SHIFT 28 #define TCR_SH1_IS (0x3UL << TCR_SH1_SHIFT) #define TCR_ORGN1_SHIFT 26 #define TCR_ORGN1_WBWA (0x1UL << TCR_ORGN1_SHIFT) #define TCR_IRGN1_SHIFT 24 #define TCR_IRGN1_WBWA (0x1UL << TCR_IRGN1_SHIFT) #define TCR_SH0_SHIFT 12 #define TCR_SH0_IS (0x3UL << TCR_SH0_SHIFT) #define TCR_ORGN0_SHIFT 10 #define TCR_ORGN0_WBWA (0x1UL << TCR_ORGN0_SHIFT) #define TCR_IRGN0_SHIFT 8 #define TCR_IRGN0_WBWA (0x1UL << TCR_IRGN0_SHIFT) #define TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\ (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA)) #ifdef SMP #define TCR_SMP_ATTRS (TCR_SH0_IS | TCR_SH1_IS) #else #define TCR_SMP_ATTRS 0 #endif #define TCR_T1SZ_SHIFT 16 #define TCR_T0SZ_SHIFT 0 #define TCR_T1SZ(x) ((x) << TCR_T1SZ_SHIFT) #define TCR_T0SZ(x) ((x) << TCR_T0SZ_SHIFT) #define TCR_TxSZ(x) (TCR_T1SZ(x) | TCR_T0SZ(x)) /* Saved Program Status Register */ #define DBG_SPSR_SS (0x1 << 21) /* Monitor Debug System Control Register */ #define DBG_MDSCR_SS (0x1 << 0) #define DBG_MDSCR_KDE (0x1 << 13) #define DBG_MDSCR_MDE (0x1 << 15) /* Perfomance Monitoring Counters */ #define PMCR_E (1 << 0) /* Enable all counters */ #define PMCR_P (1 << 1) /* Reset all counters */ #define PMCR_C (1 << 2) /* Clock counter reset */ #define PMCR_D (1 << 3) /* CNTR counts every 64 clk cycles */ #define PMCR_X (1 << 4) /* Export to ext. monitoring (ETM) */ #define PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ #define PMCR_LC (1 << 6) /* Long cycle count enable */ #define PMCR_IMP_SHIFT 24 /* Implementer code */ #define PMCR_IMP_MASK (0xff << PMCR_IMP_SHIFT) #define PMCR_IDCODE_SHIFT 16 /* Identification code */ #define PMCR_IDCODE_MASK (0xff << PMCR_IDCODE_SHIFT) #define PMCR_IDCODE_CORTEX_A57 0x01 #define PMCR_IDCODE_CORTEX_A72 0x02 #define PMCR_IDCODE_CORTEX_A53 0x03 #define PMCR_N_SHIFT 11 /* Number of counters implemented */ #define PMCR_N_MASK (0x1f << PMCR_N_SHIFT) #endif /* !_MACHINE_ARMREG_H_ */