Index: head/sys/arm/allwinner/a20/a20_mp.c =================================================================== --- head/sys/arm/allwinner/a20/a20_mp.c (revision 296099) +++ head/sys/arm/allwinner/a20/a20_mp.c (revision 296100) @@ -1,145 +1,138 @@ /*- * Copyright (c) 2014 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define CPUCFG_BASE 0x01c25c00 #define CPUCFG_SIZE 0x400 #define CPU0_RST_CTL 0x40 #define CPU0_CTL 0x44 #define CPU0_STATUS 0x48 #define CPU1_RST_CTL 0x80 #define CPU1_CTL 0x84 #define CPU1_STATUS 0x88 #define CPUCFG_GENCTL 0x184 #define CPUCFG_P_REG0 0x1a4 #define CPU1_PWR_CLAMP 0x1b0 #define CPU1_PWROFF_REG 0x1b4 #define CPUCFG_DBGCTL0 0x1e0 #define CPUCFG_DBGCTL1 0x1e4 void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { int ncpu; if (mp_ncpus != 0) return; /* Read the number of cores from the CP15 L2 Control Register. */ __asm __volatile("mrc p15, 1, %0, c9, c0, 2" : "=r" (ncpu)); ncpu = ((ncpu >> 24) & 0x3) + 1; mp_ncpus = ncpu; mp_maxid = ncpu - 1; } void platform_mp_start_ap(void) { bus_space_handle_t cpucfg; uint32_t val; if (bus_space_map(fdtbus_bs_tag, CPUCFG_BASE, CPUCFG_SIZE, 0, &cpucfg) != 0) panic("Couldn't map the CPUCFG\n"); dcache_wbinv_poc_all(); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_P_REG0, pmap_kextract((vm_offset_t)mpentry)); /* * Assert nCOREPORESET low and set L1RSTDISABLE low. * Ensure DBGPWRDUP is set to LOW to prevent any external * debug access to the processor. */ bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_RST_CTL, 0); /* Set L1RSTDISABLE low */ val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL); val &= ~(1 << 1); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_GENCTL, val); /* Set DBGPWRDUP low */ val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1); val &= ~(1 << 1); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val); /* Release power clamp */ bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0xff); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x7f); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x3f); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x1f); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x0f); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x07); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x03); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x01); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWR_CLAMP, 0x00); DELAY(10000); /* Clear power-off gating */ val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPU1_PWROFF_REG); val &= ~(1 << 0); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_PWROFF_REG, val); DELAY(1000); /* De-assert cpu core reset */ bus_space_write_4(fdtbus_bs_tag, cpucfg, CPU1_RST_CTL, 3); /* Assert DBGPWRDUP signal */ val = bus_space_read_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1); val |= (1 << 1); bus_space_write_4(fdtbus_bs_tag, cpucfg, CPUCFG_DBGCTL1, val); armv7_sev(); bus_space_unmap(fdtbus_bs_tag, cpucfg, CPUCFG_SIZE); } Index: head/sys/arm/altera/socfpga/socfpga_mp.c =================================================================== --- head/sys/arm/altera/socfpga/socfpga_mp.c (revision 296099) +++ head/sys/arm/altera/socfpga/socfpga_mp.c (revision 296100) @@ -1,166 +1,159 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define SCU_PHYSBASE 0xFFFEC000 #define SCU_SIZE 0x100 #define SCU_CONTROL_REG 0x00 #define SCU_CONTROL_ENABLE (1 << 0) #define SCU_CONFIG_REG 0x04 #define SCU_CONFIG_REG_NCPU_MASK 0x03 #define SCU_CPUPOWER_REG 0x08 #define SCU_INV_TAGS_REG 0x0c #define SCU_DIAG_CONTROL 0x30 #define SCU_DIAG_DISABLE_MIGBIT (1 << 0) #define SCU_FILTER_START_REG 0x40 #define SCU_FILTER_END_REG 0x44 #define SCU_SECURE_ACCESS_REG 0x50 #define SCU_NONSECURE_ACCESS_REG 0x54 #define RSTMGR_PHYSBASE 0xFFD05000 #define RSTMGR_SIZE 0x100 #define MPUMODRST 0x10 #define MPUMODRST_CPU1 (1 << 1) #define RAM_PHYSBASE 0x0 #define RAM_SIZE 0x1000 extern char *mpentry_addr; static void socfpga_trampoline(void); static void socfpga_trampoline(void) { __asm __volatile( "ldr pc, 1f\n" ".globl mpentry_addr\n" "mpentry_addr:\n" "1: .space 4\n"); } void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { int hwcpu, ncpu; /* If we've already set this don't bother to do it again. */ if (mp_ncpus != 0) return; hwcpu = 2; ncpu = hwcpu; TUNABLE_INT_FETCH("hw.ncpu", &ncpu); if (ncpu < 1 || ncpu > hwcpu) ncpu = hwcpu; mp_ncpus = ncpu; mp_maxid = ncpu - 1; } void platform_mp_start_ap(void) { bus_space_handle_t scu, rst, ram; int reg; if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0) panic("Couldn't map the SCU\n"); if (bus_space_map(fdtbus_bs_tag, RSTMGR_PHYSBASE, RSTMGR_SIZE, 0, &rst) != 0) panic("Couldn't map the reset manager (RSTMGR)\n"); if (bus_space_map(fdtbus_bs_tag, RAM_PHYSBASE, RAM_SIZE, 0, &ram) != 0) panic("Couldn't map the first physram page\n"); /* Invalidate SCU cache tags */ bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff); /* * Erratum ARM/MP: 764369 (problems with cache maintenance). * Setting the "disable-migratory bit" in the undocumented SCU * Diagnostic Control Register helps work around the problem. */ reg = bus_space_read_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL); reg |= (SCU_DIAG_DISABLE_MIGBIT); bus_space_write_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL, reg); /* Put CPU1 to reset state */ bus_space_write_4(fdtbus_bs_tag, rst, MPUMODRST, MPUMODRST_CPU1); /* Enable the SCU, then clean the cache on this core */ reg = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG); reg |= (SCU_CONTROL_ENABLE); bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, reg); /* Set up trampoline code */ mpentry_addr = (char *)pmap_kextract((vm_offset_t)mpentry); bus_space_write_region_4(fdtbus_bs_tag, ram, 0, (uint32_t *)&socfpga_trampoline, 8); dcache_wbinv_poc_all(); /* Put CPU1 out from reset */ bus_space_write_4(fdtbus_bs_tag, rst, MPUMODRST, 0); armv7_sev(); bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE); bus_space_unmap(fdtbus_bs_tag, rst, RSTMGR_SIZE); bus_space_unmap(fdtbus_bs_tag, ram, RAM_SIZE); } Index: head/sys/arm/amlogic/aml8726/aml8726_mp.c =================================================================== --- head/sys/arm/amlogic/aml8726/aml8726_mp.c (revision 296099) +++ head/sys/arm/amlogic/aml8726/aml8726_mp.c (revision 296100) @@ -1,642 +1,627 @@ /*- * Copyright 2015 John Wehle * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Amlogic aml8726 multiprocessor support. * * Some processors require powering on which involves poking registers * on the aobus and cbus ... it's expected that these locations are set * in stone. * * Locking is not used as these routines should only be called by the BP * during startup and should complete prior to the APs being released (the * issue being to ensure that a register such as AML_SOC_CPU_CLK_CNTL_REG * is not concurrently modified). */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static const char *scu_compatible[] = { "arm,cortex-a5-scu", "arm,cortex-a9-scu", NULL }; static const char *scu_errata_764369[] = { "arm,cortex-a9-scu", NULL }; static const char *cpucfg_compatible[] = { "amlogic,aml8726-cpuconfig", NULL }; static struct { boolean_t errata_764369; u_long scu_size; struct resource scu_res; u_long cpucfg_size; struct resource cpucfg_res; struct resource aobus_res; struct resource cbus_res; } aml8726_smp; #define AML_SCU_CONTROL_REG 0 #define AML_SCU_CONTROL_ENABLE 1 #define AML_SCU_CONFIG_REG 4 #define AML_SCU_CONFIG_NCPU_MASK 0x3 #define AML_SCU_CPU_PWR_STATUS_REG 8 #define AML_SCU_CPU_PWR_STATUS_CPU3_MASK (3 << 24) #define AML_SCU_CPU_PWR_STATUS_CPU2_MASK (3 << 16) #define AML_SCU_CPU_PWR_STATUS_CPU1_MASK (3 << 8) #define AML_SCU_CPU_PWR_STATUS_CPU0_MASK 3 #define AML_SCU_INV_TAGS_REG 12 #define AML_SCU_DIAG_CONTROL_REG 48 #define AML_SCU_DIAG_CONTROL_DISABLE_MIGBIT 1 #define AML_CPUCONF_CONTROL_REG 0 #define AML_CPUCONF_CPU1_ADDR_REG 4 #define AML_CPUCONF_CPU2_ADDR_REG 8 #define AML_CPUCONF_CPU3_ADDR_REG 12 /* aobus */ #define AML_M8_CPU_PWR_CNTL0_REG 0xe0 #define AML_M8B_CPU_PWR_CNTL0_MODE_CPU3_MASK (3 << 22) #define AML_M8B_CPU_PWR_CNTL0_MODE_CPU2_MASK (3 << 20) #define AML_M8B_CPU_PWR_CNTL0_MODE_CPU1_MASK (3 << 18) #define AML_M8_CPU_PWR_CNTL0_ISO_CPU3 (1 << 3) #define AML_M8_CPU_PWR_CNTL0_ISO_CPU2 (1 << 2) #define AML_M8_CPU_PWR_CNTL0_ISO_CPU1 (1 << 1) #define AML_M8_CPU_PWR_CNTL1_REG 0xe4 #define AML_M8B_CPU_PWR_CNTL1_PWR_CPU3 (1 << 19) #define AML_M8B_CPU_PWR_CNTL1_PWR_CPU2 (1 << 18) #define AML_M8B_CPU_PWR_CNTL1_PWR_CPU1 (1 << 17) #define AML_M8_CPU_PWR_CNTL1_MODE_CPU3_MASK (3 << 8) #define AML_M8_CPU_PWR_CNTL1_MODE_CPU2_MASK (3 << 6) #define AML_M8_CPU_PWR_CNTL1_MODE_CPU1_MASK (3 << 4) #define AML_M8B_CPU_PWR_MEM_PD0_REG 0xf4 #define AML_M8B_CPU_PWR_MEM_PD0_CPU3 (0xf << 20) #define AML_M8B_CPU_PWR_MEM_PD0_CPU2 (0xf << 24) #define AML_M8B_CPU_PWR_MEM_PD0_CPU1 (0xf << 28) /* cbus */ #define AML_SOC_CPU_CLK_CNTL_REG 0x419c #define AML_M8_CPU_CLK_CNTL_RESET_CPU3 (1 << 27) #define AML_M8_CPU_CLK_CNTL_RESET_CPU2 (1 << 26) #define AML_M8_CPU_CLK_CNTL_RESET_CPU1 (1 << 25) #define SCU_WRITE_4(reg, value) bus_write_4(&aml8726_smp.scu_res, \ (reg), (value)) #define SCU_READ_4(reg) bus_read_4(&aml8726_smp.scu_res, (reg)) #define SCU_BARRIER(reg) bus_barrier(&aml8726_smp.scu_res, \ (reg), 4, (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) #define CPUCONF_WRITE_4(reg, value) bus_write_4(&aml8726_smp.cpucfg_res, \ (reg), (value)) #define CPUCONF_READ_4(reg) bus_read_4(&aml8726_smp.cpucfg_res, \ (reg)) #define CPUCONF_BARRIER(reg) bus_barrier(&aml8726_smp.cpucfg_res, \ (reg), 4, (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) #define AOBUS_WRITE_4(reg, value) bus_write_4(&aml8726_smp.aobus_res, \ (reg), (value)) #define AOBUS_READ_4(reg) bus_read_4(&aml8726_smp.aobus_res, \ (reg)) #define AOBUS_BARRIER(reg) bus_barrier(&aml8726_smp.aobus_res, \ (reg), 4, (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) #define CBUS_WRITE_4(reg, value) bus_write_4(&aml8726_smp.cbus_res, \ (reg), (value)) #define CBUS_READ_4(reg) bus_read_4(&aml8726_smp.cbus_res, \ (reg)) #define CBUS_BARRIER(reg) bus_barrier(&aml8726_smp.cbus_res, \ (reg), 4, (BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)) static phandle_t find_node_for_device(const char *device, const char **compatible) { int i; phandle_t node; /* * Try to access the node directly i.e. through /aliases/. */ if ((node = OF_finddevice(device)) != 0) for (i = 0; compatible[i]; i++) if (fdt_is_compatible_strict(node, compatible[i])) return node; /* * Find the node the long way. */ for (i = 0; compatible[i]; i++) { if ((node = OF_finddevice("/soc")) == 0) return (0); if ((node = fdt_find_compatible(node, compatible[i], 1)) != 0) return node; } return (0); } static int alloc_resource_for_node(phandle_t node, struct resource *res, u_long *size) { int err; u_long pbase, psize; u_long start; if ((err = fdt_get_range(OF_parent(node), 0, &pbase, &psize)) != 0 || (err = fdt_regsize(node, &start, size)) != 0) return (err); start += pbase; memset(res, 0, sizeof(*res)); res->r_bustag = fdtbus_bs_tag; err = bus_space_map(res->r_bustag, start, *size, 0, &res->r_bushandle); return (err); } static void power_on_cpu(int cpu) { uint32_t scpsr; uint32_t value; if (cpu <= 0) return; /* * Power on the CPU if the intricate details are known, otherwise * just hope for the best (it may have already be powered on by * the hardware / firmware). */ switch (aml8726_soc_hw_rev) { case AML_SOC_HW_REV_M8: case AML_SOC_HW_REV_M8B: /* * Set the SCU power status for the CPU to normal mode. */ scpsr = SCU_READ_4(AML_SCU_CPU_PWR_STATUS_REG); scpsr &= ~(AML_SCU_CPU_PWR_STATUS_CPU1_MASK << ((cpu - 1) * 8)); SCU_WRITE_4(AML_SCU_CPU_PWR_STATUS_REG, scpsr); SCU_BARRIER(AML_SCU_CPU_PWR_STATUS_REG); if (aml8726_soc_hw_rev == AML_SOC_HW_REV_M8B) { /* * Reset may cause the current power status from the * actual CPU to be written to the SCU (over-writing * the value we've just written) so set it to normal * mode as well. */ value = AOBUS_READ_4(AML_M8_CPU_PWR_CNTL0_REG); value &= ~(AML_M8B_CPU_PWR_CNTL0_MODE_CPU1_MASK << ((cpu - 1) * 2)); AOBUS_WRITE_4(AML_M8_CPU_PWR_CNTL0_REG, value); AOBUS_BARRIER(AML_M8_CPU_PWR_CNTL0_REG); } DELAY(5); /* * Assert reset. */ value = CBUS_READ_4(AML_SOC_CPU_CLK_CNTL_REG); value |= AML_M8_CPU_CLK_CNTL_RESET_CPU1 << (cpu - 1); CBUS_WRITE_4(AML_SOC_CPU_CLK_CNTL_REG, value); CBUS_BARRIER(AML_SOC_CPU_CLK_CNTL_REG); if (aml8726_soc_hw_rev == AML_SOC_HW_REV_M8B) { /* * Release RAM pull-down. */ value = AOBUS_READ_4(AML_M8B_CPU_PWR_MEM_PD0_REG); value &= ~((uint32_t)AML_M8B_CPU_PWR_MEM_PD0_CPU1 >> ((cpu - 1) * 4)); AOBUS_WRITE_4(AML_M8B_CPU_PWR_MEM_PD0_REG, value); AOBUS_BARRIER(AML_M8B_CPU_PWR_MEM_PD0_REG); } /* * Power on CPU. */ value = AOBUS_READ_4(AML_M8_CPU_PWR_CNTL1_REG); value &= ~(AML_M8_CPU_PWR_CNTL1_MODE_CPU1_MASK << ((cpu - 1) * 2)); AOBUS_WRITE_4(AML_M8_CPU_PWR_CNTL1_REG, value); AOBUS_BARRIER(AML_M8_CPU_PWR_CNTL1_REG); DELAY(10); if (aml8726_soc_hw_rev == AML_SOC_HW_REV_M8B) { /* * Wait for power on confirmation. */ for ( ; ; ) { value = AOBUS_READ_4(AML_M8_CPU_PWR_CNTL1_REG); value &= AML_M8B_CPU_PWR_CNTL1_PWR_CPU1 << (cpu - 1); if (value) break; DELAY(10); } } /* * Release peripheral clamp. */ value = AOBUS_READ_4(AML_M8_CPU_PWR_CNTL0_REG); value &= ~(AML_M8_CPU_PWR_CNTL0_ISO_CPU1 << (cpu - 1)); AOBUS_WRITE_4(AML_M8_CPU_PWR_CNTL0_REG, value); AOBUS_BARRIER(AML_M8_CPU_PWR_CNTL0_REG); /* * Release reset. */ value = CBUS_READ_4(AML_SOC_CPU_CLK_CNTL_REG); value &= ~(AML_M8_CPU_CLK_CNTL_RESET_CPU1 << (cpu - 1)); CBUS_WRITE_4(AML_SOC_CPU_CLK_CNTL_REG, value); CBUS_BARRIER(AML_SOC_CPU_CLK_CNTL_REG); if (aml8726_soc_hw_rev == AML_SOC_HW_REV_M8B) { /* * The Amlogic Linux platform code sets the SCU power * status for the CPU again for some reason so we * follow suit (perhaps in case the reset caused * a stale power status from the actual CPU to be * written to the SCU). */ SCU_WRITE_4(AML_SCU_CPU_PWR_STATUS_REG, scpsr); SCU_BARRIER(AML_SCU_CPU_PWR_STATUS_REG); } break; default: break; } } - -void -platform_mp_init_secondary(void) -{ - - /* - * Consider modifying the timer driver to support - * per-cpu timers and then enabling the timer for - * each AP. - */ - - intr_pic_init_secondary(); -} - - void platform_mp_setmaxid(void) { int err; int i; int ncpu; phandle_t cpucfg_node; phandle_t scu_node; uint32_t value; if (mp_ncpus != 0) return; ncpu = 1; /* * Is the hardware necessary for SMP present? */ if ((scu_node = find_node_for_device("scu", scu_compatible)) == 0) goto moveon; if ((cpucfg_node = find_node_for_device("cpuconfig", cpucfg_compatible)) == 0) goto moveon; if (alloc_resource_for_node(scu_node, &aml8726_smp.scu_res, &aml8726_smp.scu_size) != 0) panic("Could not allocate resource for SCU"); if (alloc_resource_for_node(cpucfg_node, &aml8726_smp.cpucfg_res, &aml8726_smp.cpucfg_size) != 0) panic("Could not allocate resource for CPUCONFIG"); /* * Strictly speaking the aobus and cbus may not be required in * order to start an AP (it depends on the processor), however * always mapping them in simplifies the code. */ aml8726_smp.aobus_res.r_bustag = fdtbus_bs_tag; err = bus_space_map(aml8726_smp.aobus_res.r_bustag, AML_SOC_AOBUS_BASE_ADDR, 0x100000, 0, &aml8726_smp.aobus_res.r_bushandle); if (err) panic("Could not allocate resource for AOBUS"); aml8726_smp.cbus_res.r_bustag = fdtbus_bs_tag; err = bus_space_map(aml8726_smp.cbus_res.r_bustag, AML_SOC_CBUS_BASE_ADDR, 0x100000, 0, &aml8726_smp.cbus_res.r_bushandle); if (err) panic("Could not allocate resource for CBUS"); aml8726_smp.errata_764369 = false; for (i = 0; scu_errata_764369[i]; i++) if (fdt_is_compatible_strict(scu_node, scu_errata_764369[i])) { aml8726_smp.errata_764369 = true; break; } /* * Read the number of CPUs present. */ value = SCU_READ_4(AML_SCU_CONFIG_REG); ncpu = (value & AML_SCU_CONFIG_NCPU_MASK) + 1; moveon: mp_ncpus = ncpu; mp_maxid = ncpu - 1; } void platform_mp_start_ap(void) { int i; uint32_t reg; uint32_t value; vm_paddr_t paddr; if (mp_ncpus < 2) return; /* * Invalidate SCU cache tags. The 0x0000ffff constant invalidates * all ways on all cores 0-3. Per the ARM docs, it's harmless to * write to the bits for cores that are not present. */ SCU_WRITE_4(AML_SCU_INV_TAGS_REG, 0x0000ffff); if (aml8726_smp.errata_764369) { /* * Erratum ARM/MP: 764369 (problems with cache maintenance). * Setting the "disable-migratory bit" in the undocumented SCU * Diagnostic Control Register helps work around the problem. */ value = SCU_READ_4(AML_SCU_DIAG_CONTROL_REG); value |= AML_SCU_DIAG_CONTROL_DISABLE_MIGBIT; SCU_WRITE_4(AML_SCU_DIAG_CONTROL_REG, value); } /* * Enable the SCU, then clean the cache on this core. After these * two operations the cache tag ram in the SCU is coherent with * the contents of the cache on this core. The other cores aren't * running yet so their caches can't contain valid data yet, however * we've initialized their SCU tag ram above, so they will be * coherent from startup. */ value = SCU_READ_4(AML_SCU_CONTROL_REG); value |= AML_SCU_CONTROL_ENABLE; SCU_WRITE_4(AML_SCU_CONTROL_REG, value); SCU_BARRIER(AML_SCU_CONTROL_REG); dcache_wbinv_poc_all(); /* Set the boot address and power on each AP. */ paddr = pmap_kextract((vm_offset_t)mpentry); for (i = 1; i < mp_ncpus; i++) { reg = AML_CPUCONF_CPU1_ADDR_REG + ((i - 1) * 4); CPUCONF_WRITE_4(reg, paddr); CPUCONF_BARRIER(reg); power_on_cpu(i); } /* * Enable the APs. * * The Amlogic Linux platform code sets the lsb for some reason * in addition to the enable bit for each AP so we follow suit * (the lsb may be the enable bit for the BP, though in that case * it should already be set since it's currently running). */ value = CPUCONF_READ_4(AML_CPUCONF_CONTROL_REG); value |= 1; for (i = 1; i < mp_ncpus; i++) value |= (1 << i); CPUCONF_WRITE_4(AML_CPUCONF_CONTROL_REG, value); CPUCONF_BARRIER(AML_CPUCONF_CONTROL_REG); /* Wakeup the now enabled APs */ armv7_sev(); /* * Free the resources which are not needed after startup. */ bus_space_unmap(aml8726_smp.scu_res.r_bustag, aml8726_smp.scu_res.r_bushandle, aml8726_smp.scu_size); bus_space_unmap(aml8726_smp.cpucfg_res.r_bustag, aml8726_smp.cpucfg_res.r_bushandle, aml8726_smp.cpucfg_size); bus_space_unmap(aml8726_smp.aobus_res.r_bustag, aml8726_smp.aobus_res.r_bushandle, 0x100000); bus_space_unmap(aml8726_smp.cbus_res.r_bustag, aml8726_smp.cbus_res.r_bushandle, 0x100000); memset(&aml8726_smp, 0, sizeof(aml8726_smp)); } /* * Stub drivers for cosmetic purposes. */ struct aml8726_scu_softc { device_t dev; }; static int aml8726_scu_probe(device_t dev) { int i; for (i = 0; scu_compatible[i]; i++) if (ofw_bus_is_compatible(dev, scu_compatible[i])) break; if (!scu_compatible[i]) return (ENXIO); device_set_desc(dev, "ARM Snoop Control Unit"); return (BUS_PROBE_DEFAULT); } static int aml8726_scu_attach(device_t dev) { struct aml8726_scu_softc *sc = device_get_softc(dev); sc->dev = dev; return (0); } static int aml8726_scu_detach(device_t dev) { return (0); } static device_method_t aml8726_scu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_scu_probe), DEVMETHOD(device_attach, aml8726_scu_attach), DEVMETHOD(device_detach, aml8726_scu_detach), DEVMETHOD_END }; static driver_t aml8726_scu_driver = { "scu", aml8726_scu_methods, sizeof(struct aml8726_scu_softc), }; static devclass_t aml8726_scu_devclass; EARLY_DRIVER_MODULE(scu, simplebus, aml8726_scu_driver, aml8726_scu_devclass, 0, 0, BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE); struct aml8726_cpucfg_softc { device_t dev; }; static int aml8726_cpucfg_probe(device_t dev) { int i; for (i = 0; cpucfg_compatible[i]; i++) if (ofw_bus_is_compatible(dev, cpucfg_compatible[i])) break; if (!cpucfg_compatible[i]) return (ENXIO); device_set_desc(dev, "Amlogic CPU Config"); return (BUS_PROBE_DEFAULT); } static int aml8726_cpucfg_attach(device_t dev) { struct aml8726_cpucfg_softc *sc = device_get_softc(dev); sc->dev = dev; return (0); } static int aml8726_cpucfg_detach(device_t dev) { return (0); } static device_method_t aml8726_cpucfg_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aml8726_cpucfg_probe), DEVMETHOD(device_attach, aml8726_cpucfg_attach), DEVMETHOD(device_detach, aml8726_cpucfg_detach), DEVMETHOD_END }; static driver_t aml8726_cpucfg_driver = { "cpuconfig", aml8726_cpucfg_methods, sizeof(struct aml8726_cpucfg_softc), }; static devclass_t aml8726_cpucfg_devclass; EARLY_DRIVER_MODULE(cpuconfig, simplebus, aml8726_cpucfg_driver, aml8726_cpucfg_devclass, 0, 0, BUS_PASS_CPU + BUS_PASS_ORDER_MIDDLE); Index: head/sys/arm/annapurna/alpine/alpine_machdep_mp.c =================================================================== --- head/sys/arm/annapurna/alpine/alpine_machdep_mp.c (revision 296099) +++ head/sys/arm/annapurna/alpine/alpine_machdep_mp.c (revision 296100) @@ -1,320 +1,313 @@ /*- * Copyright (c) 2013 Ruslan Bukin * Copyright (c) 2015 Semihalf * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define AL_CPU_RESUME_WATERMARK_REG 0x00 #define AL_CPU_RESUME_FLAGS_REG 0x04 #define AL_CPU_RESUME_PCPU_RADDR_REG(cpu) (0x08 + 0x04 + 8*(cpu)) #define AL_CPU_RESUME_PCPU_FLAGS(cpu) (0x08 + 8*(cpu)) /* Per-CPU flags */ #define AL_CPU_RESUME_FLG_PERCPU_DONT_RESUME (1 << 2) /* The expected magic number for validating the resume addresses */ #define AL_CPU_RESUME_MAGIC_NUM 0xf0e1d200 #define AL_CPU_RESUME_MAGIC_NUM_MASK 0xffffff00 /* The expected minimal version number for validating the capabilities */ #define AL_CPU_RESUME_MIN_VER 0x000000c3 #define AL_CPU_RESUME_MIN_VER_MASK 0x000000ff /* Field controlling the boot-up of companion cores */ #define AL_NB_INIT_CONTROL (0x8) #define AL_NB_CONFIG_STATUS_PWR_CTRL(cpu) (0x2020 + (cpu)*0x100) #define SERDES_NUM_GROUPS 4 #define SERDES_GROUP_SIZE 0x400 extern bus_addr_t al_devmap_pa; extern bus_addr_t al_devmap_size; extern void mpentry(void); int alpine_serdes_resource_get(uint32_t group, bus_space_tag_t *tag, bus_addr_t *baddr); static int platform_mp_get_core_cnt(void); static int alpine_get_cpu_resume_base(u_long *pbase, u_long *psize); static int alpine_get_nb_base(u_long *pbase, u_long *psize); static int alpine_get_serdes_base(u_long *pbase, u_long *psize); int alpine_serdes_resource_get(uint32_t group, bus_space_tag_t *tag, bus_addr_t *baddr); static boolean_t alpine_validate_cpu(u_int, phandle_t, u_int, pcell_t *); static boolean_t alpine_validate_cpu(u_int id, phandle_t child, u_int addr_cell, pcell_t *reg) { return fdt_is_compatible(child, "arm,cortex-a15"); } static int platform_mp_get_core_cnt(void) { static int ncores = 0; int nchilds; uint32_t reg; /* Calculate ncores value only once */ if (ncores) return (ncores); reg = cp15_l2ctlr_get(); ncores = CPUV7_L2CTLR_NPROC(reg); nchilds = ofw_cpu_early_foreach(alpine_validate_cpu, false); /* Limit CPUs if DTS has configured less than available */ if ((nchilds > 0) && (nchilds < ncores)) { printf("SMP: limiting number of active CPUs to %d out of %d\n", nchilds, ncores); ncores = nchilds; } return (ncores); } void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { mp_ncpus = platform_mp_get_core_cnt(); mp_maxid = mp_ncpus - 1; } static int alpine_get_cpu_resume_base(u_long *pbase, u_long *psize) { phandle_t node; u_long base = 0; u_long size = 0; if (pbase == NULL || psize == NULL) return (EINVAL); if ((node = OF_finddevice("/")) == -1) return (EFAULT); if ((node = ofw_bus_find_compatible(node, "annapurna-labs,al-cpu-resume")) == 0) return (EFAULT); if (fdt_regsize(node, &base, &size)) return (EFAULT); *pbase = base; *psize = size; return (0); } static int alpine_get_nb_base(u_long *pbase, u_long *psize) { phandle_t node; u_long base = 0; u_long size = 0; if (pbase == NULL || psize == NULL) return (EINVAL); if ((node = OF_finddevice("/")) == -1) return (EFAULT); if ((node = ofw_bus_find_compatible(node, "annapurna-labs,al-nb-service")) == 0) return (EFAULT); if (fdt_regsize(node, &base, &size)) return (EFAULT); *pbase = base; *psize = size; return (0); } void platform_mp_start_ap(void) { uint32_t physaddr; vm_offset_t vaddr; uint32_t val; uint32_t start_mask; u_long cpu_resume_base; u_long nb_base; u_long cpu_resume_size; u_long nb_size; bus_addr_t cpu_resume_baddr; bus_addr_t nb_baddr; int a; if (alpine_get_cpu_resume_base(&cpu_resume_base, &cpu_resume_size)) panic("Couldn't resolve cpu_resume_base address\n"); if (alpine_get_nb_base(&nb_base, &nb_size)) panic("Couldn't resolve_nb_base address\n"); /* Proceed with start addresses for additional CPUs */ if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + cpu_resume_base, cpu_resume_size, 0, &cpu_resume_baddr)) panic("Couldn't map CPU-resume area"); if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base, nb_size, 0, &nb_baddr)) panic("Couldn't map NB-service area"); /* Proceed with start addresses for additional CPUs */ val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr, AL_CPU_RESUME_WATERMARK_REG); if (((val & AL_CPU_RESUME_MAGIC_NUM_MASK) != AL_CPU_RESUME_MAGIC_NUM) || ((val & AL_CPU_RESUME_MIN_VER_MASK) < AL_CPU_RESUME_MIN_VER)) { panic("CPU-resume device is not compatible"); } vaddr = (vm_offset_t)mpentry; physaddr = pmap_kextract(vaddr); for (a = 1; a < platform_mp_get_core_cnt(); a++) { /* Power up the core */ bus_space_write_4(fdtbus_bs_tag, nb_baddr, AL_NB_CONFIG_STATUS_PWR_CTRL(a), 0); mb(); /* Enable resume */ val = bus_space_read_4(fdtbus_bs_tag, cpu_resume_baddr, AL_CPU_RESUME_PCPU_FLAGS(a)); val &= ~AL_CPU_RESUME_FLG_PERCPU_DONT_RESUME; bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr, AL_CPU_RESUME_PCPU_FLAGS(a), val); mb(); /* Set resume physical address */ bus_space_write_4(fdtbus_bs_tag, cpu_resume_baddr, AL_CPU_RESUME_PCPU_RADDR_REG(a), physaddr); mb(); } /* Release cores from reset */ if (bus_space_map(fdtbus_bs_tag, al_devmap_pa + nb_base, nb_size, 0, &nb_baddr)) panic("Couldn't map NB-service area"); start_mask = (1 << platform_mp_get_core_cnt()) - 1; /* Release cores from reset */ val = bus_space_read_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL); val |= start_mask; bus_space_write_4(fdtbus_bs_tag, nb_baddr, AL_NB_INIT_CONTROL, val); dsb(); bus_space_unmap(fdtbus_bs_tag, nb_baddr, nb_size); bus_space_unmap(fdtbus_bs_tag, cpu_resume_baddr, cpu_resume_size); } static int alpine_get_serdes_base(u_long *pbase, u_long *psize) { phandle_t node; u_long base = 0; u_long size = 0; if (pbase == NULL || psize == NULL) return (EINVAL); if ((node = OF_finddevice("/")) == -1) return (EFAULT); if ((node = ofw_bus_find_compatible(node, "annapurna-labs,al-serdes")) == 0) return (EFAULT); if (fdt_regsize(node, &base, &size)) return (EFAULT); *pbase = base; *psize = size; return (0); } int alpine_serdes_resource_get(uint32_t group, bus_space_tag_t *tag, bus_addr_t *baddr) { u_long serdes_base, serdes_size; int ret; static bus_addr_t baddr_mapped[SERDES_NUM_GROUPS]; if (group >= SERDES_NUM_GROUPS) return (EINVAL); if (baddr_mapped[group]) { *tag = fdtbus_bs_tag; *baddr = baddr_mapped[group]; return (0); } ret = alpine_get_serdes_base(&serdes_base, &serdes_size); if (ret) return (ret); ret = bus_space_map(fdtbus_bs_tag, al_devmap_pa + serdes_base + group * SERDES_GROUP_SIZE, (SERDES_NUM_GROUPS - group) * SERDES_GROUP_SIZE, 0, baddr); if (ret) return (ret); baddr_mapped[group] = *baddr; return (0); } Index: head/sys/arm/arm/mp_machdep.c =================================================================== --- head/sys/arm/arm/mp_machdep.c (revision 296099) +++ head/sys/arm/arm/mp_machdep.c (revision 296100) @@ -1,527 +1,527 @@ /*- * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef CPU_MV_PJ4B #include #include #endif #include "opt_smp.h" extern struct pcpu __pcpu[]; /* used to hold the AP's until we are ready to release them */ struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; /* # of Applications processors */ volatile int mp_naps; /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; #ifndef ARM_INTRNG static int ipi_handler(void *arg); #endif void set_stackptrs(int cpu); /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; /* Determine if we running MP machine */ int cpu_mp_probe(void) { KASSERT(mp_ncpus != 0, ("cpu_mp_probe: mp_ncpus is unset")); CPU_SETOF(0, &all_cpus); return (mp_ncpus > 1); } /* Start Application Processor via platform specific function */ static int check_ap(void) { uint32_t ms; for (ms = 0; ms < 2000; ++ms) { if ((mp_naps + 1) == mp_ncpus) return (0); /* success */ else DELAY(1000); } return (-2); } extern unsigned char _end[]; /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { int error, i; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* Reserve memory for application processors */ for(i = 0; i < (mp_ncpus - 1); i++) dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); dcache_wbinv_poc_all(); /* Initialize boot code and start up processors */ platform_mp_start_ap(); /* Check if ap's started properly */ error = check_ap(); if (error) printf("WARNING: Some AP's failed to start\n"); else for (i = 1; i < mp_ncpus; i++) CPU_SET(i, &all_cpus); } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } extern vm_paddr_t pmap_pa; void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; #ifndef ARM_INTRNG int start = 0, end = 0; #endif uint32_t actlr_mask, actlr_set; pmap_set_tex(); cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set); cpu_setup(); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); enable_interrupts(PSR_A); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!atomic_load_acq_int(&aps_ready)) { #if __ARM_ARCH >= 7 __asm __volatile("wfe"); #endif } /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); #ifdef VFP vfp_init(); #endif mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); #ifndef ARM_INTRNG /* Enable ipi */ #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) arm_unmask_irq(i); #endif /* INTRNG */ enable_interrupts(PSR_I); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); - platform_mp_init_secondary(); + intr_pic_init_secondary(); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } #ifdef ARM_INTRNG static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_stop(void *dummy __unused) { u_int cpu; /* * IPI_STOP_HARD is mapped to IPI_STOP. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); } static void ipi_preempt(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(td); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } static void ipi_hardclock(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } #else static int ipi_handler(void *arg) { u_int cpu, ipi; cpu = PCPU_GET(cpuid); ipi = pic_ipi_read((int)arg); while ((ipi != 0x3ff)) { switch (ipi) { case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } pic_ipi_clear(ipi); ipi = pic_ipi_read(-1); } return (FILTER_HANDLED); } #endif static void release_aps(void *dummy __unused) { uint32_t loop_counter; #ifndef ARM_INTRNG int start = 0, end = 0; #endif if (mp_ncpus == 1) return; #ifdef ARM_INTRNG intr_ipi_set_handler(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL, 0); intr_ipi_set_handler(IPI_AST, "ast", ipi_ast, NULL, 0); intr_ipi_set_handler(IPI_STOP, "stop", ipi_stop, NULL, 0); intr_ipi_set_handler(IPI_PREEMPT, "preempt", ipi_preempt, NULL, 0); intr_ipi_set_handler(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL, 0); #else #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) { /* * IPI handler */ /* * Use 0xdeadbeef as the argument value for irq 0, * if we used 0, the intr code will give the trap frame * pointer instead. */ arm_setup_irqhandler("ipi", ipi_handler, NULL, (void *)i, i, INTR_TYPE_MISC | INTR_EXCL, NULL); /* Enable ipi */ arm_unmask_irq(i); } #endif atomic_store_rel_int(&aps_ready, 1); /* Wake the other threads up */ #if __ARM_ARCH >= 7 armv7_sev(); #endif printf("Release APs\n"); for (loop_counter = 0; loop_counter < 2000; loop_counter++) { if (smp_started) return; DELAY(1000); } printf("AP's not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); struct cpu_group * cpu_topo(void) { return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); } void cpu_mp_setmaxid(void) { platform_mp_setmaxid(); } /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); pic_ipi_send(other_cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); pic_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); pic_ipi_send(cpus, ipi); } Index: head/sys/arm/broadcom/bcm2835/bcm2835_intr.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2835_intr.c (revision 296099) +++ head/sys/arm/broadcom/bcm2835/bcm2835_intr.c (revision 296100) @@ -1,242 +1,249 @@ /*- * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Based on OMAP3 INTC code by Ben Gray * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SOC_BCM2836 #include #endif #define INTC_PENDING_BASIC 0x00 #define INTC_PENDING_BANK1 0x04 #define INTC_PENDING_BANK2 0x08 #define INTC_FIQ_CONTROL 0x0C #define INTC_ENABLE_BANK1 0x10 #define INTC_ENABLE_BANK2 0x14 #define INTC_ENABLE_BASIC 0x18 #define INTC_DISABLE_BANK1 0x1C #define INTC_DISABLE_BANK2 0x20 #define INTC_DISABLE_BASIC 0x24 #define BANK1_START 8 #define BANK1_END (BANK1_START + 32 - 1) #define BANK2_START (BANK1_START + 32) #define BANK2_END (BANK2_START + 32 - 1) #define BANK3_START (BANK2_START + 32) #define BANK3_END (BANK3_START + 32 - 1) #define IS_IRQ_BASIC(n) (((n) >= 0) && ((n) < BANK1_START)) #define IS_IRQ_BANK1(n) (((n) >= BANK1_START) && ((n) <= BANK1_END)) #define IS_IRQ_BANK2(n) (((n) >= BANK2_START) && ((n) <= BANK2_END)) #define ID_IRQ_BCM2836(n) (((n) >= BANK3_START) && ((n) <= BANK3_END)) #define IRQ_BANK1(n) ((n) - BANK1_START) #define IRQ_BANK2(n) ((n) - BANK2_START) #ifdef DEBUG #define dprintf(fmt, args...) printf(fmt, ##args) #else #define dprintf(fmt, args...) #endif struct bcm_intc_softc { device_t sc_dev; struct resource * intc_res; bus_space_tag_t intc_bst; bus_space_handle_t intc_bsh; }; static struct bcm_intc_softc *bcm_intc_sc = NULL; #define intc_read_4(_sc, reg) \ bus_space_read_4((_sc)->intc_bst, (_sc)->intc_bsh, (reg)) #define intc_write_4(_sc, reg, val) \ bus_space_write_4((_sc)->intc_bst, (_sc)->intc_bsh, (reg), (val)) static int bcm_intc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "broadcom,bcm2835-armctrl-ic")) return (ENXIO); device_set_desc(dev, "BCM2835 Interrupt Controller"); return (BUS_PROBE_DEFAULT); } static int bcm_intc_attach(device_t dev) { struct bcm_intc_softc *sc = device_get_softc(dev); int rid = 0; sc->sc_dev = dev; if (bcm_intc_sc) return (ENXIO); sc->intc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->intc_res == NULL) { device_printf(dev, "could not allocate memory resource\n"); return (ENXIO); } sc->intc_bst = rman_get_bustag(sc->intc_res); sc->intc_bsh = rman_get_bushandle(sc->intc_res); bcm_intc_sc = sc; return (0); } static device_method_t bcm_intc_methods[] = { DEVMETHOD(device_probe, bcm_intc_probe), DEVMETHOD(device_attach, bcm_intc_attach), { 0, 0 } }; static driver_t bcm_intc_driver = { "intc", bcm_intc_methods, sizeof(struct bcm_intc_softc), }; static devclass_t bcm_intc_devclass; DRIVER_MODULE(intc, simplebus, bcm_intc_driver, bcm_intc_devclass, 0, 0); int arm_get_next_irq(int last_irq) { struct bcm_intc_softc *sc = bcm_intc_sc; uint32_t pending; int32_t irq = last_irq + 1; #ifdef SOC_BCM2836 int ret; #endif /* Sanity check */ if (irq < 0) irq = 0; #ifdef SOC_BCM2836 if ((ret = bcm2836_get_next_irq(irq)) < 0) return (-1); if (ret != BCM2836_GPU_IRQ) return (ret + BANK3_START); #endif /* TODO: should we mask last_irq? */ if (irq < BANK1_START) { pending = intc_read_4(sc, INTC_PENDING_BASIC); if ((pending & 0xFF) == 0) { irq = BANK1_START; /* skip to next bank */ } else do { if (pending & (1 << irq)) return irq; irq++; } while (irq < BANK1_START); } if (irq < BANK2_START) { pending = intc_read_4(sc, INTC_PENDING_BANK1); if (pending == 0) { irq = BANK2_START; /* skip to next bank */ } else do { if (pending & (1 << IRQ_BANK1(irq))) return irq; irq++; } while (irq < BANK2_START); } if (irq < BANK3_START) { pending = intc_read_4(sc, INTC_PENDING_BANK2); if (pending != 0) do { if (pending & (1 << IRQ_BANK2(irq))) return irq; irq++; } while (irq < BANK3_START); } return (-1); } void arm_mask_irq(uintptr_t nb) { struct bcm_intc_softc *sc = bcm_intc_sc; dprintf("%s: %d\n", __func__, nb); if (IS_IRQ_BASIC(nb)) intc_write_4(sc, INTC_DISABLE_BASIC, (1 << nb)); else if (IS_IRQ_BANK1(nb)) intc_write_4(sc, INTC_DISABLE_BANK1, (1 << IRQ_BANK1(nb))); else if (IS_IRQ_BANK2(nb)) intc_write_4(sc, INTC_DISABLE_BANK2, (1 << IRQ_BANK2(nb))); #ifdef SOC_BCM2836 else if (ID_IRQ_BCM2836(nb)) bcm2836_mask_irq(nb - BANK3_START); #endif else printf("arm_mask_irq: Invalid IRQ number: %d\n", nb); } void arm_unmask_irq(uintptr_t nb) { struct bcm_intc_softc *sc = bcm_intc_sc; dprintf("%s: %d\n", __func__, nb); if (IS_IRQ_BASIC(nb)) intc_write_4(sc, INTC_ENABLE_BASIC, (1 << nb)); else if (IS_IRQ_BANK1(nb)) intc_write_4(sc, INTC_ENABLE_BANK1, (1 << IRQ_BANK1(nb))); else if (IS_IRQ_BANK2(nb)) intc_write_4(sc, INTC_ENABLE_BANK2, (1 << IRQ_BANK2(nb))); #ifdef SOC_BCM2836 else if (ID_IRQ_BCM2836(nb)) bcm2836_unmask_irq(nb - BANK3_START); #endif else printf("arm_mask_irq: Invalid IRQ number: %d\n", nb); } + +#ifdef SMP +void +intr_pic_init_secondary(void) +{ +} +#endif Index: head/sys/arm/broadcom/bcm2835/bcm2836_mp.c =================================================================== --- head/sys/arm/broadcom/bcm2835/bcm2836_mp.c (revision 296099) +++ head/sys/arm/broadcom/bcm2835/bcm2836_mp.c (revision 296100) @@ -1,184 +1,178 @@ /*- * Copyright (C) 2015 Daisuke Aoyama * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define DPRINTF(fmt, ...) do { \ printf("%s:%u: ", __func__, __LINE__); \ printf(fmt, ##__VA_ARGS__); \ } while (0) #else #define DPRINTF(fmt, ...) #endif #define ARM_LOCAL_BASE 0x40000000 #define ARM_LOCAL_SIZE 0x00001000 /* mailbox registers */ #define MBOXINTRCTRL_CORE(n) (0x00000050 + (0x04 * (n))) #define MBOX0SET_CORE(n) (0x00000080 + (0x10 * (n))) #define MBOX1SET_CORE(n) (0x00000084 + (0x10 * (n))) #define MBOX2SET_CORE(n) (0x00000088 + (0x10 * (n))) #define MBOX3SET_CORE(n) (0x0000008C + (0x10 * (n))) #define MBOX0CLR_CORE(n) (0x000000C0 + (0x10 * (n))) #define MBOX1CLR_CORE(n) (0x000000C4 + (0x10 * (n))) #define MBOX2CLR_CORE(n) (0x000000C8 + (0x10 * (n))) #define MBOX3CLR_CORE(n) (0x000000CC + (0x10 * (n))) static bus_space_handle_t bs_periph; #define BSRD4(addr) \ bus_space_read_4(fdtbus_bs_tag, bs_periph, (addr)) #define BSWR4(addr, val) \ bus_space_write_4(fdtbus_bs_tag, bs_periph, (addr), (val)) void -platform_mp_init_secondary(void) -{ - -} - -void platform_mp_setmaxid(void) { DPRINTF("platform_mp_setmaxid\n"); if (mp_ncpus != 0) return; mp_ncpus = 4; mp_maxid = mp_ncpus - 1; DPRINTF("mp_maxid=%d\n", mp_maxid); } void platform_mp_start_ap(void) { uint32_t val; int i, retry; DPRINTF("platform_mp_start_ap\n"); /* initialize */ if (bus_space_map(fdtbus_bs_tag, ARM_LOCAL_BASE, ARM_LOCAL_SIZE, 0, &bs_periph) != 0) panic("can't map local peripheral\n"); for (i = 0; i < mp_ncpus; i++) { /* clear mailbox 0/3 */ BSWR4(MBOX0CLR_CORE(i), 0xffffffff); BSWR4(MBOX3CLR_CORE(i), 0xffffffff); } wmb(); dcache_wbinv_poc_all(); /* boot secondary CPUs */ for (i = 1; i < mp_ncpus; i++) { /* set entry point to mailbox 3 */ BSWR4(MBOX3SET_CORE(i), (uint32_t)pmap_kextract((vm_offset_t)mpentry)); wmb(); /* wait for bootup */ retry = 1000; do { /* check entry point */ val = BSRD4(MBOX3CLR_CORE(i)); if (val == 0) break; DELAY(100); retry--; if (retry <= 0) { printf("can't start for CPU%d\n", i); break; } } while (1); /* dsb and sev */ armv7_sev(); /* recode AP in CPU map */ CPU_SET(i, &all_cpus); } } void pic_ipi_send(cpuset_t cpus, u_int ipi) { int i; dsb(); for (i = 0; i < mp_ncpus; i++) { if (CPU_ISSET(i, &cpus)) BSWR4(MBOX0SET_CORE(i), 1 << ipi); } wmb(); } int pic_ipi_read(int i) { uint32_t val; int cpu, ipi; cpu = PCPU_GET(cpuid); dsb(); if (i != -1) { val = BSRD4(MBOX0CLR_CORE(cpu)); if (val == 0) return (0); ipi = ffs(val) - 1; BSWR4(MBOX0CLR_CORE(cpu), 1 << ipi); dsb(); return (ipi); } return (0x3ff); } void pic_ipi_clear(int ipi) { } Index: head/sys/arm/freescale/imx/imx6_mp.c =================================================================== --- head/sys/arm/freescale/imx/imx6_mp.c (revision 296099) +++ head/sys/arm/freescale/imx/imx6_mp.c (revision 296100) @@ -1,164 +1,157 @@ /*- * Copyright (c) 2014 Juergen Weiss * Copyright (c) 2014 Ian Lepore * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define SCU_PHYSBASE 0x00a00000 #define SCU_SIZE 0x00001000 #define SCU_CONTROL_REG 0x00 #define SCU_CONTROL_ENABLE (1 << 0) #define SCU_CONFIG_REG 0x04 #define SCU_CONFIG_REG_NCPU_MASK 0x03 #define SCU_CPUPOWER_REG 0x08 #define SCU_INV_TAGS_REG 0x0c #define SCU_DIAG_CONTROL 0x30 #define SCU_DIAG_DISABLE_MIGBIT (1 << 0) #define SCU_FILTER_START_REG 0x40 #define SCU_FILTER_END_REG 0x44 #define SCU_SECURE_ACCESS_REG 0x50 #define SCU_NONSECURE_ACCESS_REG 0x54 #define SRC_PHYSBASE 0x020d8000 #define SRC_SIZE 0x4000 #define SRC_CONTROL_REG 0x00 #define SRC_CONTROL_C1ENA_SHIFT 22 /* Bit for Core 1 enable */ #define SRC_CONTROL_C1RST_SHIFT 14 /* Bit for Core 1 reset */ #define SRC_GPR0_C1FUNC 0x20 /* Register for Core 1 entry func */ #define SRC_GPR1_C1ARG 0x24 /* Register for Core 1 entry arg */ void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { bus_space_handle_t scu; int hwcpu, ncpu; uint32_t val; /* If we've already set the global vars don't bother to do it again. */ if (mp_ncpus != 0) return; if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0) panic("Couldn't map the SCU\n"); val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONFIG_REG); hwcpu = (val & SCU_CONFIG_REG_NCPU_MASK) + 1; bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE); ncpu = hwcpu; TUNABLE_INT_FETCH("hw.ncpu", &ncpu); if (ncpu < 1 || ncpu > hwcpu) ncpu = hwcpu; mp_ncpus = ncpu; mp_maxid = ncpu - 1; } void platform_mp_start_ap(void) { bus_space_handle_t scu; bus_space_handle_t src; uint32_t val; int i; if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0) panic("Couldn't map the SCU\n"); if (bus_space_map(fdtbus_bs_tag, SRC_PHYSBASE, SRC_SIZE, 0, &src) != 0) panic("Couldn't map the system reset controller (SRC)\n"); /* * Invalidate SCU cache tags. The 0x0000ffff constant invalidates all * ways on all cores 0-3. Per the ARM docs, it's harmless to write to * the bits for cores that are not present. */ bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff); /* * Erratum ARM/MP: 764369 (problems with cache maintenance). * Setting the "disable-migratory bit" in the undocumented SCU * Diagnostic Control Register helps work around the problem. */ val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL); bus_space_write_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL, val | SCU_DIAG_DISABLE_MIGBIT); /* * Enable the SCU, then clean the cache on this core. After these two * operations the cache tag ram in the SCU is coherent with the contents * of the cache on this core. The other cores aren't running yet so * their caches can't contain valid data yet, but we've initialized * their SCU tag ram above, so they will be coherent from startup. */ val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG); bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, val | SCU_CONTROL_ENABLE); dcache_wbinv_poc_all(); /* * For each AP core, set the entry point address and argument registers, * and set the core-enable and core-reset bits in the control register. */ val = bus_space_read_4(fdtbus_bs_tag, src, SRC_CONTROL_REG); for (i=1; i < mp_ncpus; i++) { bus_space_write_4(fdtbus_bs_tag, src, SRC_GPR0_C1FUNC + 8*i, pmap_kextract((vm_offset_t)mpentry)); bus_space_write_4(fdtbus_bs_tag, src, SRC_GPR1_C1ARG + 8*i, 0); val |= ((1 << (SRC_CONTROL_C1ENA_SHIFT - 1 + i )) | ( 1 << (SRC_CONTROL_C1RST_SHIFT - 1 + i))); } bus_space_write_4(fdtbus_bs_tag, src, SRC_CONTROL_REG, val); armv7_sev(); bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE); bus_space_unmap(fdtbus_bs_tag, src, SRC_SIZE); } Index: head/sys/arm/include/smp.h =================================================================== --- head/sys/arm/include/smp.h (revision 296099) +++ head/sys/arm/include/smp.h (revision 296100) @@ -1,54 +1,53 @@ /* $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #include #include #ifdef ARM_INTRNG enum { IPI_AST, IPI_PREEMPT, IPI_RENDEZVOUS, IPI_STOP, IPI_STOP_HARD = IPI_STOP, /* These are synonyms on arm. */ IPI_HARDCLOCK, IPI_TLB, /* Not used now, but keep it reserved. */ IPI_CACHE, /* Not used now, but keep it reserved. */ INTR_IPI_COUNT }; #else #define IPI_AST 0 #define IPI_PREEMPT 2 #define IPI_RENDEZVOUS 3 #define IPI_STOP 4 #define IPI_STOP_HARD 4 #define IPI_HARDCLOCK 6 #define IPI_TLB 7 /* Not used now, but keep it reserved. */ #define IPI_CACHE 8 /* Not used now, but keep it reserved. */ #endif /* INTRNG */ void init_secondary(int cpu); void mpentry(void); void ipi_all_but_self(u_int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpuset_t cpus, u_int ipi); /* PIC interface */ void pic_ipi_send(cpuset_t cpus, u_int ipi); #ifndef ARM_INTRNG void pic_ipi_clear(int ipi); int pic_ipi_read(int arg); #endif /* Platform interface */ void platform_mp_setmaxid(void); void platform_mp_start_ap(void); -void platform_mp_init_secondary(void); /* global data in mp_machdep.c */ extern struct pcb stoppcbs[]; #endif /* !_MACHINE_SMP_H_ */ Index: head/sys/arm/mv/armada38x/armada38x_mp.c =================================================================== --- head/sys/arm/mv/armada38x/armada38x_mp.c (revision 296099) +++ head/sys/arm/mv/armada38x/armada38x_mp.c (revision 296100) @@ -1,152 +1,145 @@ /*- * Copyright (c) 2015 Semihalf. * Copyright (c) 2015 Stormshield. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include "pmsu.h" int cpu_reset_deassert(void); int cpu_reset_deassert(void) { bus_space_handle_t vaddr; uint32_t reg; int rv; rv = bus_space_map(fdtbus_bs_tag, (bus_addr_t)MV_CPU_RESET_BASE, MV_CPU_RESET_REGS_LEN, 0, &vaddr); if (rv != 0) return (rv); /* CPU1 is held at reset by default - clear assert bit to release it */ reg = bus_space_read_4(fdtbus_bs_tag, vaddr, CPU_RESET_OFFSET(1)); reg &= ~CPU_RESET_ASSERT; bus_space_write_4(fdtbus_bs_tag, vaddr, CPU_RESET_OFFSET(1), reg); bus_space_unmap(fdtbus_bs_tag, vaddr, MV_CPU_RESET_REGS_LEN); return (0); } static int platform_cnt_cpus(void) { bus_space_handle_t vaddr_scu; phandle_t cpus_node, child; char device_type[16]; int fdt_cpu_count = 0; int reg_cpu_count = 0; uint32_t val; int rv; cpus_node = OF_finddevice("/cpus"); if (cpus_node == -1) { /* Default is one core */ mp_ncpus = 1; return (0); } /* Get number of 'cpu' nodes from FDT */ for (child = OF_child(cpus_node); child != 0; child = OF_peer(child)) { /* Check if child is a CPU */ memset(device_type, 0, sizeof(device_type)); rv = OF_getprop(child, "device_type", device_type, sizeof(device_type) - 1); if (rv < 0) continue; if (strcmp(device_type, "cpu") != 0) continue; fdt_cpu_count++; } /* Get number of CPU cores from SCU register to cross-check with FDT */ rv = bus_space_map(fdtbus_bs_tag, (bus_addr_t)MV_SCU_BASE, MV_SCU_REGS_LEN, 0, &vaddr_scu); if (rv != 0) { /* Default is one core */ mp_ncpus = 1; return (0); } val = bus_space_read_4(fdtbus_bs_tag, vaddr_scu, MV_SCU_REG_CONFIG); bus_space_unmap(fdtbus_bs_tag, vaddr_scu, MV_SCU_REGS_LEN); reg_cpu_count = (val & SCU_CFG_REG_NCPU_MASK) + 1; /* Set mp_ncpus to number of cpus in FDT unless SOC contains only one */ mp_ncpus = min(reg_cpu_count, fdt_cpu_count); /* mp_ncpus must be at least 1 */ mp_ncpus = max(1, mp_ncpus); return (mp_ncpus); } void platform_mp_setmaxid(void) { /* Armada38x family supports maximum 2 cores */ mp_ncpus = platform_cnt_cpus(); mp_maxid = 1; } void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_start_ap(void) { int rv; /* Write secondary entry address to PMSU register */ rv = pmsu_boot_secondary_cpu(); if (rv != 0) return; /* Release CPU1 from reset */ cpu_reset_deassert(); } Index: head/sys/arm/mv/armadaxp/armadaxp_mp.c =================================================================== --- head/sys/arm/mv/armadaxp/armadaxp_mp.c (revision 296099) +++ head/sys/arm/mv/armadaxp/armadaxp_mp.c (revision 296100) @@ -1,181 +1,176 @@ /*- * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MV_AXP_CPU_DIVCLK_BASE (MV_BASE + 0x18700) #define CPU_DIVCLK_CTRL0 0x00 #define CPU_DIVCLK_CTRL2_RATIO_FULL0 0x08 #define CPU_DIVCLK_CTRL2_RATIO_FULL1 0x0c #define CPU_DIVCLK_MASK(x) (~(0xff << (8 * (x)))) #define CPU_PMU(x) (MV_BASE + 0x22100 + (0x100 * (x))) #define CPU_PMU_BOOT 0x24 #define MP (MV_BASE + 0x20800) #define MP_SW_RESET(x) ((x) * 8) #define CPU_RESUME_CONTROL (0x20988) void armadaxp_init_coher_fabric(void); int platform_get_ncpus(void); /* Coherency Fabric registers */ static uint32_t read_cpu_clkdiv(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_AXP_CPU_DIVCLK_BASE, reg)); } static void write_cpu_clkdiv(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_AXP_CPU_DIVCLK_BASE, reg, val); } void platform_mp_setmaxid(void) { mp_ncpus = platform_get_ncpus(); mp_maxid = mp_ncpus - 1; } -void -platform_mp_init_secondary(void) -{ -} - void mptramp(void); void mptramp_end(void); extern vm_offset_t mptramp_pmu_boot; void platform_mp_start_ap(void) { uint32_t reg, *src, *dst, cpu_num, div_val, cputype; vm_offset_t pmu_boot_off; /* * Initialization procedure depends on core revision, * in this step CHIP ID is checked to choose proper procedure */ cputype = cpu_ident(); cputype &= CPU_ID_CPU_MASK; /* * Set the PA of CPU0 Boot Address Redirect register used in * mptramp according to the actual SoC registers' base address. */ pmu_boot_off = (CPU_PMU(0) - MV_BASE) + CPU_PMU_BOOT; mptramp_pmu_boot = fdt_immr_pa + pmu_boot_off; dst = pmap_mapdev(0xffff0000, PAGE_SIZE); for (src = (uint32_t *)mptramp; src < (uint32_t *)mptramp_end; src++, dst++) { *dst = *src; } pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE); if (cputype == CPU_ID_MV88SV584X_V7) { /* Core rev A0 */ div_val = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1); div_val &= 0x3f; for (cpu_num = 1; cpu_num < mp_ncpus; cpu_num++ ) { reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1); reg &= CPU_DIVCLK_MASK(cpu_num); reg |= div_val << (cpu_num * 8); write_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1, reg); } } else { /* Core rev Z1 */ div_val = 0x01; if (mp_ncpus > 1) { reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL0); reg &= CPU_DIVCLK_MASK(3); reg |= div_val << 24; write_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL0, reg); } for (cpu_num = 2; cpu_num < mp_ncpus; cpu_num++ ) { reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1); reg &= CPU_DIVCLK_MASK(cpu_num); reg |= div_val << (cpu_num * 8); write_cpu_clkdiv(CPU_DIVCLK_CTRL2_RATIO_FULL1, reg); } } reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL0); reg |= ((0x1 << (mp_ncpus - 1)) - 1) << 21; write_cpu_clkdiv(CPU_DIVCLK_CTRL0, reg); reg = read_cpu_clkdiv(CPU_DIVCLK_CTRL0); reg |= 0x01000000; write_cpu_clkdiv(CPU_DIVCLK_CTRL0, reg); DELAY(100); reg &= ~(0xf << 21); write_cpu_clkdiv(CPU_DIVCLK_CTRL0, reg); DELAY(100); bus_space_write_4(fdtbus_bs_tag, MV_BASE, CPU_RESUME_CONTROL, 0); for (cpu_num = 1; cpu_num < mp_ncpus; cpu_num++ ) bus_space_write_4(fdtbus_bs_tag, CPU_PMU(cpu_num), CPU_PMU_BOOT, pmap_kextract((vm_offset_t)mpentry)); dcache_wbinv_poc_all(); for (cpu_num = 1; cpu_num < mp_ncpus; cpu_num++ ) bus_space_write_4(fdtbus_bs_tag, MP, MP_SW_RESET(cpu_num), 0); /* XXX: Temporary workaround for hangup after releasing AP's */ wmb(); DELAY(10); armadaxp_init_coher_fabric(); } Index: head/sys/arm/mv/mpic.c =================================================================== --- head/sys/arm/mv/mpic.c (revision 296099) +++ head/sys/arm/mv/mpic.c (revision 296100) @@ -1,398 +1,403 @@ /*- * Copyright (c) 2006 Benno Rice. * Copyright (C) 2007-2011 MARVELL INTERNATIONAL LTD. * Copyright (c) 2012 Semihalf. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: //depot/projects/arm/src/sys/arm/xscale/pxa2x0/pxa2x0_icu.c, rev 1 * from: FreeBSD: src/sys/arm/mv/ic.c,v 1.5 2011/02/08 01:49:30 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #define MPIC_INT_ERR 4 #define MPIC_INT_MSI 96 #define IRQ_MASK 0x3ff #define MPIC_CTRL 0x0 #define MPIC_SOFT_INT 0x4 #define MPIC_SOFT_INT_DRBL1 (1 << 5) #define MPIC_ERR_CAUSE 0x20 #define MPIC_ISE 0x30 #define MPIC_ICE 0x34 #define MPIC_IN_DRBL 0x78 #define MPIC_IN_DRBL_MASK 0x7c #define MPIC_CTP 0xb0 #define MPIC_CTP 0xb0 #define MPIC_IIACK 0xb4 #define MPIC_ISM 0xb8 #define MPIC_ICM 0xbc #define MPIC_ERR_MASK 0xec0 struct mv_mpic_softc { device_t sc_dev; struct resource * mpic_res[3]; bus_space_tag_t mpic_bst; bus_space_handle_t mpic_bsh; bus_space_tag_t cpu_bst; bus_space_handle_t cpu_bsh; bus_space_tag_t drbl_bst; bus_space_handle_t drbl_bsh; }; static struct resource_spec mv_mpic_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_MEMORY, 1, RF_ACTIVE }, { SYS_RES_MEMORY, 2, RF_ACTIVE }, { -1, 0 } }; static struct mv_mpic_softc *mv_mpic_sc = NULL; void mpic_send_ipi(int cpus, u_int ipi); static int mv_mpic_probe(device_t); static int mv_mpic_attach(device_t); uint32_t mv_mpic_get_cause(void); uint32_t mv_mpic_get_cause_err(void); uint32_t mv_mpic_get_msi(void); static void arm_mask_irq_err(uintptr_t); static void arm_unmask_irq_err(uintptr_t); static void arm_unmask_msi(void); #define MPIC_CPU_WRITE(softc, reg, val) \ bus_space_write_4((softc)->cpu_bst, (softc)->cpu_bsh, (reg), (val)) #define MPIC_CPU_READ(softc, reg) \ bus_space_read_4((softc)->cpu_bst, (softc)->cpu_bsh, (reg)) #define MPIC_DRBL_WRITE(softc, reg, val) \ bus_space_write_4((softc)->drbl_bst, (softc)->drbl_bsh, (reg), (val)) #define MPIC_DRBL_READ(softc, reg) \ bus_space_read_4((softc)->drbl_bst, (softc)->drbl_bsh, (reg)) static int mv_mpic_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "mrvl,mpic")) return (ENXIO); device_set_desc(dev, "Marvell Integrated Interrupt Controller"); return (0); } static int mv_mpic_attach(device_t dev) { struct mv_mpic_softc *sc; int error; sc = (struct mv_mpic_softc *)device_get_softc(dev); if (mv_mpic_sc != NULL) return (ENXIO); mv_mpic_sc = sc; sc->sc_dev = dev; error = bus_alloc_resources(dev, mv_mpic_spec, sc->mpic_res); if (error) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->mpic_bst = rman_get_bustag(sc->mpic_res[0]); sc->mpic_bsh = rman_get_bushandle(sc->mpic_res[0]); sc->cpu_bst = rman_get_bustag(sc->mpic_res[1]); sc->cpu_bsh = rman_get_bushandle(sc->mpic_res[1]); sc->drbl_bst = rman_get_bustag(sc->mpic_res[2]); sc->drbl_bsh = rman_get_bushandle(sc->mpic_res[2]); bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_CTRL, 1); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CTP, 0); arm_unmask_msi(); return (0); } static device_method_t mv_mpic_methods[] = { DEVMETHOD(device_probe, mv_mpic_probe), DEVMETHOD(device_attach, mv_mpic_attach), { 0, 0 } }; static driver_t mv_mpic_driver = { "mpic", mv_mpic_methods, sizeof(struct mv_mpic_softc), }; static devclass_t mv_mpic_devclass; DRIVER_MODULE(mpic, simplebus, mv_mpic_driver, mv_mpic_devclass, 0, 0); int arm_get_next_irq(int last) { u_int irq, next = -1; irq = mv_mpic_get_cause() & IRQ_MASK; CTR2(KTR_INTR, "%s: irq:%#x", __func__, irq); if (irq != IRQ_MASK) { if (irq == MPIC_INT_ERR) irq = mv_mpic_get_cause_err(); if (irq == MPIC_INT_MSI) irq = mv_mpic_get_msi(); next = irq; } CTR3(KTR_INTR, "%s: last=%d, next=%d", __func__, last, next); return (next); } /* * XXX We can make arm_enable_irq to operate on ICE and then mask/unmask only * by ISM/ICM and remove access to ICE in masking operation */ void arm_mask_irq(uintptr_t nb) { MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CTP, 1); if (nb < ERR_IRQ) { bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ICE, nb); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ISM, nb); } else if (nb < MSI_IRQ) arm_mask_irq_err(nb); } static void arm_mask_irq_err(uintptr_t nb) { uint32_t mask; uint8_t bit_off; bit_off = nb - ERR_IRQ; mask = MPIC_CPU_READ(mv_mpic_sc, MPIC_ERR_MASK); mask &= ~(1 << bit_off); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ERR_MASK, mask); } void arm_unmask_irq(uintptr_t nb) { MPIC_CPU_WRITE(mv_mpic_sc, MPIC_CTP, 0); if (nb < ERR_IRQ) { bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ISE, nb); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ICM, nb); } else if (nb < MSI_IRQ) arm_unmask_irq_err(nb); if (nb == 0) MPIC_CPU_WRITE(mv_mpic_sc, MPIC_IN_DRBL_MASK, 0xffffffff); } void arm_unmask_irq_err(uintptr_t nb) { uint32_t mask; uint8_t bit_off; bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ISE, MPIC_INT_ERR); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ICM, MPIC_INT_ERR); bit_off = nb - ERR_IRQ; mask = MPIC_CPU_READ(mv_mpic_sc, MPIC_ERR_MASK); mask |= (1 << bit_off); MPIC_CPU_WRITE(mv_mpic_sc, MPIC_ERR_MASK, mask); } static void arm_unmask_msi(void) { arm_unmask_irq(MPIC_INT_MSI); } uint32_t mv_mpic_get_cause(void) { return (MPIC_CPU_READ(mv_mpic_sc, MPIC_IIACK)); } uint32_t mv_mpic_get_cause_err(void) { uint32_t err_cause; uint8_t bit_off; err_cause = bus_space_read_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_ERR_CAUSE); if (err_cause) bit_off = ffs(err_cause) - 1; else return (-1); debugf("%s: irq:%x cause:%x\n", __func__, bit_off, err_cause); return (ERR_IRQ + bit_off); } uint32_t mv_mpic_get_msi(void) { uint32_t cause; uint8_t bit_off; cause = MPIC_DRBL_READ(mv_mpic_sc, 0); if (cause) bit_off = ffs(cause) - 1; else return (-1); debugf("%s: irq:%x cause:%x\n", __func__, bit_off, cause); cause &= ~(1 << bit_off); MPIC_DRBL_WRITE(mv_mpic_sc, 0, cause); return (MSI_IRQ + bit_off); } int mv_msi_data(int irq, uint64_t *addr, uint32_t *data) { u_long phys, base, size; phandle_t node; int error; node = ofw_bus_get_node(mv_mpic_sc->sc_dev); /* Get physical addres of register space */ error = fdt_get_range(OF_parent(node), 0, &phys, &size); if (error) { printf("%s: Cannot get register physical address, err:%d", __func__, error); return (error); } /* Get offset of MPIC register space */ error = fdt_regsize(node, &base, &size); if (error) { printf("%s: Cannot get MPIC register offset, err:%d", __func__, error); return (error); } *addr = phys + base + MPIC_SOFT_INT; *data = MPIC_SOFT_INT_DRBL1 | irq; return (0); } #if defined(SMP) void +intr_pic_init_secondary(void) +{ +} + +void pic_ipi_send(cpuset_t cpus, u_int ipi) { uint32_t val, i; val = 0x00000000; for (i = 0; i < MAXCPU; i++) if (CPU_ISSET(i, &cpus)) val |= (1 << (8 + i)); val |= ipi; bus_space_write_4(mv_mpic_sc->mpic_bst, mv_mpic_sc->mpic_bsh, MPIC_SOFT_INT, val); } int pic_ipi_read(int i __unused) { uint32_t val; int ipi; val = MPIC_CPU_READ(mv_mpic_sc, MPIC_IN_DRBL); if (val) { ipi = ffs(val) - 1; MPIC_CPU_WRITE(mv_mpic_sc, MPIC_IN_DRBL, ~(1 << ipi)); return (ipi); } return (0x3ff); } void pic_ipi_clear(int ipi) { } #endif Index: head/sys/arm/qemu/virt_mp.c =================================================================== --- head/sys/arm/qemu/virt_mp.c (revision 296099) +++ head/sys/arm/qemu/virt_mp.c (revision 296100) @@ -1,99 +1,92 @@ /*- * Copyright (c) 2015 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include static int running_cpus; static boolean_t virt_maxid(u_int id, phandle_t node, u_int addr_cells, pcell_t *reg) { if (mp_maxid < id) mp_maxid = id; return (true); } void platform_mp_setmaxid(void) { mp_maxid = PCPU_GET(cpuid); mp_ncpus = ofw_cpu_early_foreach(virt_maxid, true); if (mp_ncpus < 1) mp_ncpus = 1; mp_ncpus = MIN(mp_ncpus, MAXCPU); } static boolean_t virt_start_ap(u_int id, phandle_t node, u_int addr_cells, pcell_t *reg) { int err; if (running_cpus >= mp_ncpus) return (false); running_cpus++; err = psci_cpu_on(*reg, pmap_kextract((vm_offset_t)mpentry), id); if (err != PSCI_RETVAL_SUCCESS) return (false); return (true); } void platform_mp_start_ap(void) { ofw_cpu_early_foreach(virt_start_ap, true); } - -void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} Index: head/sys/arm/rockchip/rk30xx_mp.c =================================================================== --- head/sys/arm/rockchip/rk30xx_mp.c (revision 296099) +++ head/sys/arm/rockchip/rk30xx_mp.c (revision 296100) @@ -1,178 +1,171 @@ /*- * Copyright (c) 2014 Ganbold Tsagaankhuu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define SCU_PHYSBASE 0x1013c000 #define SCU_SIZE 0x100 #define SCU_CONTROL_REG 0x00 #define SCU_CONTROL_ENABLE (1 << 0) #define SCU_STANDBY_EN (1 << 5) #define SCU_CONFIG_REG 0x04 #define SCU_CONFIG_REG_NCPU_MASK 0x03 #define SCU_CPUPOWER_REG 0x08 #define SCU_INV_TAGS_REG 0x0c #define SCU_FILTER_START_REG 0x10 #define SCU_FILTER_END_REG 0x14 #define SCU_SECURE_ACCESS_REG 0x18 #define SCU_NONSECURE_ACCESS_REG 0x1c #define IMEM_PHYSBASE 0x10080000 #define IMEM_SIZE 0x20 #define PMU_PHYSBASE 0x20004000 #define PMU_SIZE 0x100 #define PMU_PWRDN_CON 0x08 #define PMU_PWRDN_SCU (1 << 4) extern char *mpentry_addr; static void rk30xx_boot2(void); static void rk30xx_boot2(void) { __asm __volatile( "ldr pc, 1f\n" ".globl mpentry_addr\n" "mpentry_addr:\n" "1: .space 4\n"); } void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { bus_space_handle_t scu; int ncpu; uint32_t val; if (mp_ncpus != 0) return; if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0) panic("Could not map the SCU"); val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONFIG_REG); ncpu = (val & SCU_CONFIG_REG_NCPU_MASK) + 1; bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE); mp_ncpus = ncpu; mp_maxid = ncpu - 1; } void platform_mp_start_ap(void) { bus_space_handle_t scu; bus_space_handle_t imem; bus_space_handle_t pmu; uint32_t val; int i; if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0) panic("Could not map the SCU"); if (bus_space_map(fdtbus_bs_tag, IMEM_PHYSBASE, IMEM_SIZE, 0, &imem) != 0) panic("Could not map the IMEM"); if (bus_space_map(fdtbus_bs_tag, PMU_PHYSBASE, PMU_SIZE, 0, &pmu) != 0) panic("Could not map the PMU"); /* * Invalidate SCU cache tags. The 0x0000ffff constant invalidates all * ways on all cores 0-3. Per the ARM docs, it's harmless to write to * the bits for cores that are not present. */ bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff); /* Make sure all cores except the first are off */ val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON); for (i = 1; i < mp_ncpus; i++) val |= 1 << i; bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val); /* Enable SCU power domain */ val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON); val &= ~PMU_PWRDN_SCU; bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val); /* Enable SCU */ val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG); bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, val | SCU_CONTROL_ENABLE); /* * Cores will execute the code which resides at the start of * the on-chip bootram/sram after power-on. This sram region * should be reserved and the trampoline code that directs * the core to the real startup code in ram should be copied * into this sram region. * * First set boot function for the sram code. */ mpentry_addr = (char *)pmap_kextract((vm_offset_t)mpentry); /* Copy trampoline to sram, that runs during startup of the core */ bus_space_write_region_4(fdtbus_bs_tag, imem, 0, (uint32_t *)&rk30xx_boot2, 8); dcache_wbinv_poc_all(); /* Start all cores */ val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON); for (i = 1; i < mp_ncpus; i++) val &= ~(1 << i); bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val); armv7_sev(); bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE); bus_space_unmap(fdtbus_bs_tag, imem, IMEM_SIZE); bus_space_unmap(fdtbus_bs_tag, pmu, PMU_SIZE); } Index: head/sys/arm/samsung/exynos/exynos5_mp.c =================================================================== --- head/sys/arm/samsung/exynos/exynos5_mp.c (revision 296099) +++ head/sys/arm/samsung/exynos/exynos5_mp.c (revision 296100) @@ -1,137 +1,130 @@ /*- * Copyright (c) 2013-2014 Ruslan Bukin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #define EXYNOS_CHIPID 0x10000000 #define EXYNOS5250_SOC_ID 0x43520000 #define EXYNOS5420_SOC_ID 0xE5420000 #define EXYNOS5_SOC_ID_MASK 0xFFFFF000 #define EXYNOS_SYSRAM 0x02020000 #define EXYNOS5420_SYSRAM_NS (EXYNOS_SYSRAM + 0x53000 + 0x1c) #define EXYNOS_PMU_BASE 0x10040000 #define CORE_CONFIG(n) (0x2000 + (0x80 * (n))) #define CORE_STATUS(n) (CORE_CONFIG(n) + 0x4) #define CORE_PWR_EN 0x3 static int exynos_get_soc_id(void) { bus_addr_t chipid; int reg; if (bus_space_map(fdtbus_bs_tag, EXYNOS_CHIPID, 0x1000, 0, &chipid) != 0) panic("Couldn't map chipid\n"); reg = bus_space_read_4(fdtbus_bs_tag, chipid, 0x0); bus_space_unmap(fdtbus_bs_tag, chipid, 0x1000); return (reg & EXYNOS5_SOC_ID_MASK); } void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { if (exynos_get_soc_id() == EXYNOS5420_SOC_ID) mp_ncpus = 4; else mp_ncpus = 2; mp_maxid = mp_ncpus - 1; } void platform_mp_start_ap(void) { bus_addr_t sysram, pmu; int err, i, j; int status; int reg; err = bus_space_map(fdtbus_bs_tag, EXYNOS_PMU_BASE, 0x20000, 0, &pmu); if (err != 0) panic("Couldn't map pmu\n"); if (exynos_get_soc_id() == EXYNOS5420_SOC_ID) reg = EXYNOS5420_SYSRAM_NS; else reg = EXYNOS_SYSRAM; err = bus_space_map(fdtbus_bs_tag, reg, 0x100, 0, &sysram); if (err != 0) panic("Couldn't map sysram\n"); /* Give power to CPUs */ for (i = 1; i < mp_ncpus; i++) { bus_space_write_4(fdtbus_bs_tag, pmu, CORE_CONFIG(i), CORE_PWR_EN); for (j = 10; j >= 0; j--) { status = bus_space_read_4(fdtbus_bs_tag, pmu, CORE_STATUS(i)); if ((status & CORE_PWR_EN) == CORE_PWR_EN) break; DELAY(10); if (j == 0) printf("Can't power on CPU%d\n", i); } } bus_space_write_4(fdtbus_bs_tag, sysram, 0x0, pmap_kextract((vm_offset_t)mpentry)); dcache_wbinv_poc_all(); armv7_sev(); bus_space_unmap(fdtbus_bs_tag, sysram, 0x100); bus_space_unmap(fdtbus_bs_tag, pmu, 0x20000); } Index: head/sys/arm/ti/omap4/omap4_mp.c =================================================================== --- head/sys/arm/ti/omap4/omap4_mp.c (revision 296099) +++ head/sys/arm/ti/omap4/omap4_mp.c (revision 296100) @@ -1,75 +1,69 @@ /*- * Copyright (c) 2012 Olivier Houchard. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include void -platform_mp_init_secondary(void) -{ - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { mp_maxid = 1; mp_ncpus = 2; } void platform_mp_start_ap(void) { bus_addr_t scu_addr; if (bus_space_map(fdtbus_bs_tag, 0x48240000, 0x1000, 0, &scu_addr) != 0) panic("Couldn't map the SCU\n"); /* Enable the SCU */ *(volatile unsigned int *)scu_addr |= 1; //*(volatile unsigned int *)(scu_addr + 0x30) |= 1; dcache_wbinv_poc_all(); ti_smc0(0x200, 0xfffffdff, MODIFY_AUX_CORE_0); ti_smc0(pmap_kextract((vm_offset_t)mpentry), 0, WRITE_AUX_CORE_1); armv7_sev(); bus_space_unmap(fdtbus_bs_tag, scu_addr, 0x1000); } Index: head/sys/arm/xilinx/zy7_mp.c =================================================================== --- head/sys/arm/xilinx/zy7_mp.c (revision 296099) +++ head/sys/arm/xilinx/zy7_mp.c (revision 296100) @@ -1,105 +1,98 @@ /*- * Copyright (c) 2013 Thomas Skibo. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define ZYNQ7_CPU1_ENTRY 0xfffffff0 #define SCU_CONTROL_REG 0xf8f00000 #define SCU_CONTROL_ENABLE (1 << 0) void -platform_mp_init_secondary(void) -{ - - intr_pic_init_secondary(); -} - -void platform_mp_setmaxid(void) { mp_maxid = 1; mp_ncpus = 2; } void platform_mp_start_ap(void) { bus_space_handle_t scu_handle; bus_space_handle_t ocm_handle; uint32_t scu_ctrl; /* Map in SCU control register. */ if (bus_space_map(fdtbus_bs_tag, SCU_CONTROL_REG, 4, 0, &scu_handle) != 0) panic("platform_mp_start_ap: Couldn't map SCU config reg\n"); /* Set SCU enable bit. */ scu_ctrl = bus_space_read_4(fdtbus_bs_tag, scu_handle, 0); scu_ctrl |= SCU_CONTROL_ENABLE; bus_space_write_4(fdtbus_bs_tag, scu_handle, 0, scu_ctrl); bus_space_unmap(fdtbus_bs_tag, scu_handle, 4); /* Map in magic location to give entry address to CPU1. */ if (bus_space_map(fdtbus_bs_tag, ZYNQ7_CPU1_ENTRY, 4, 0, &ocm_handle) != 0) panic("platform_mp_start_ap: Couldn't map OCM\n"); /* Write start address for CPU1. */ bus_space_write_4(fdtbus_bs_tag, ocm_handle, 0, pmap_kextract((vm_offset_t)mpentry)); bus_space_unmap(fdtbus_bs_tag, ocm_handle, 4); /* * The SCU is enabled above but I think the second CPU doesn't * turn on filtering until after the wake-up below. I think that's why * things don't work if I don't put these cache ops here. Also, the * magic location, 0xfffffff0, isn't in the SCU's filtering range so it * needs a write-back too. */ dcache_wbinv_poc_all(); /* Wake up CPU1. */ armv7_sev(); }