diff --git a/sys/mips/include/md_var.h b/sys/mips/include/md_var.h index ac2b7b692139..cfc13c950d4f 100644 --- a/sys/mips/include/md_var.h +++ b/sys/mips/include/md_var.h @@ -1,84 +1,85 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1995 Bruce D. Evans. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: src/sys/i386/include/md_var.h,v 1.35 2000/02/20 20:51:23 bsd * JNPR: md_var.h,v 1.4 2006/10/16 12:30:34 katta */ #ifndef _MACHINE_MD_VAR_H_ #define _MACHINE_MD_VAR_H_ #include /* * Miscellaneous machine-dependent declarations. */ extern long Maxmem; extern char cpu_board[]; extern char cpu_model[]; extern char sigcode[]; extern int szsigcode; #if defined(__mips_n32) || defined(__mips_n64) extern char sigcode32[]; extern int szsigcode32; #endif extern vm_offset_t kstack0; extern vm_offset_t kernel_kseg0_end; uint32_t MipsFPID(void); void MipsSaveCurFPState(struct thread *); void fork_trampoline(void); uintptr_t MipsEmulateBranch(struct trapframe *, uintptr_t, int, uintptr_t); void MipsSwitchFPState(struct thread *, struct trapframe *); int is_cacheable_mem(vm_paddr_t addr); void mips_wait(void); #define MIPS_DEBUG 0 #if MIPS_DEBUG #define MIPS_DEBUG_PRINT(fmt, args...) printf("%s: " fmt "\n" , __FUNCTION__ , ## args) #else #define MIPS_DEBUG_PRINT(fmt, args...) #endif void mips_vector_init(void); void mips_cpu_init(void); +void mips_hwrena_init(void); void mips_pcpu0_init(void); void mips_proc0_init(void); void mips_postboot_fixup(void); void cpu_identify(void); void cpu_switch_set_userlocal(void) __asm(__STRING(cpu_switch_set_userlocal)); struct dumperinfo; struct minidumpstate; int cpu_minidumpsys(struct dumperinfo *, const struct minidumpstate *); #endif /* !_MACHINE_MD_VAR_H_ */ diff --git a/sys/mips/mips/cpu.c b/sys/mips/mips/cpu.c index d504d6c34d14..fc8c9bddbc42 100644 --- a/sys/mips/mips/cpu.c +++ b/sys/mips/mips/cpu.c @@ -1,585 +1,600 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 Juli Mallett. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CPU_CNMIPS) #include #include #endif struct mips_cpuinfo cpuinfo; #define _ENCODE_INSN(a,b,c,d,e) \ ((uint32_t)(((a) << 26)|((b) << 21)|((c) << 16)|((d) << 11)|(e))) #define _JR_RA _ENCODE_INSN(OP_SPECIAL, RA, 0, 0, OP_JR) #define _NOP 0 /* * Patch cpu_switch() by removing the UserLocal register code at the end. * For MIPS hardware that don't support UserLocal Register Implementation * we remove the instructions that update this register which may cause a * reserved instruction exception in the kernel. */ static void remove_userlocal_code(uint32_t *cpu_switch_code) { uint32_t *instructp; instructp = cpu_switch_code; instructp[0] = _JR_RA; instructp[1] = _NOP; } /* * Attempt to identify the MIPS CPU as much as possible. * * XXX: Assumes the CPU is MIPS{32,64}{,r2} compliant. * XXX: For now, skip config register selections 2 and 3 * as we don't currently use L2/L3 cache or additional * MIPS32 processor features. */ static void mips_get_identity(struct mips_cpuinfo *cpuinfo) { u_int32_t prid; u_int32_t cfg0; u_int32_t cfg1; u_int32_t cfg2; u_int32_t cfg3; #if defined(CPU_CNMIPS) u_int32_t cfg4; #endif u_int32_t tmp; memset(cpuinfo, 0, sizeof(struct mips_cpuinfo)); /* Read and store the PrID ID for CPU identification. */ prid = mips_rd_prid(); cpuinfo->cpu_vendor = MIPS_PRID_CID(prid); cpuinfo->cpu_rev = MIPS_PRID_REV(prid); cpuinfo->cpu_impl = MIPS_PRID_IMPL(prid); /* Read config register selection 0 to learn TLB type. */ cfg0 = mips_rd_config(); cpuinfo->tlb_type = ((cfg0 & MIPS_CONFIG0_MT_MASK) >> MIPS_CONFIG0_MT_SHIFT); cpuinfo->icache_virtual = cfg0 & MIPS_CONFIG0_VI; /* If config register selection 1 does not exist, return. */ if (!(cfg0 & MIPS_CONFIG0_M)) return; /* Learn TLB size and L1 cache geometry. */ cfg1 = mips_rd_config1(); /* Get the Config2 and Config3 registers as well. */ cfg2 = 0; cfg3 = 0; if (cfg1 & MIPS_CONFIG1_M) { cfg2 = mips_rd_config2(); if (cfg2 & MIPS_CONFIG2_M) cfg3 = mips_rd_config3(); } /* Save FP implementation revision if FP is present. */ if (cfg1 & MIPS_CONFIG1_FP) cpuinfo->fpu_id = MipsFPID(); /* Check to see if UserLocal register is implemented. */ if (cfg3 & MIPS_CONFIG3_ULR) { - /* UserLocal register is implemented, enable it. */ + /* + * UserLocal register is implemented, enable it later in + * mips_hwrena_init. + */ cpuinfo->userlocal_reg = true; - tmp = mips_rd_hwrena(); - mips_wr_hwrena(tmp | MIPS_HWRENA_UL); } else { /* * UserLocal register is not implemented. Patch * cpu_switch() and remove unsupported code. */ cpuinfo->userlocal_reg = false; remove_userlocal_code((uint32_t *)cpu_switch_set_userlocal); } #if defined(CPU_NLM) /* Account for Extended TLB entries in XLP */ tmp = mips_rd_config6(); cpuinfo->tlb_nentries = ((tmp >> 16) & 0xffff) + 1; #elif defined(BERI_LARGE_TLB) /* Check if we support extended TLB entries and if so activate. */ tmp = mips_rd_config5(); #define BERI_CP5_LTLB_SUPPORTED 0x1 if (tmp & BERI_CP5_LTLB_SUPPORTED) { /* See how many extra TLB entries we have. */ tmp = mips_rd_config6(); cpuinfo->tlb_nentries = (tmp >> 16) + 1; /* Activate the extended entries. */ mips_wr_config6(tmp|0x4); } else #endif #if !defined(CPU_NLM) cpuinfo->tlb_nentries = ((cfg1 & MIPS_CONFIG1_TLBSZ_MASK) >> MIPS_CONFIG1_TLBSZ_SHIFT) + 1; #endif #if defined(CPU_CNMIPS) /* Add extended TLB size information from config4. */ cfg4 = mips_rd_config4(); if ((cfg4 & MIPS_CONFIG4_MMUEXTDEF) == MIPS_CONFIG4_MMUEXTDEF_MMUSIZEEXT) cpuinfo->tlb_nentries += (cfg4 & MIPS_CONFIG4_MMUSIZEEXT) * 0x40; #endif /* L1 instruction cache. */ #ifdef MIPS_DISABLE_L1_CACHE cpuinfo->l1.ic_linesize = 0; #else tmp = (cfg1 & MIPS_CONFIG1_IL_MASK) >> MIPS_CONFIG1_IL_SHIFT; if (tmp != 0) { cpuinfo->l1.ic_linesize = 1 << (tmp + 1); cpuinfo->l1.ic_nways = (((cfg1 & MIPS_CONFIG1_IA_MASK) >> MIPS_CONFIG1_IA_SHIFT)) + 1; cpuinfo->l1.ic_nsets = 1 << (((cfg1 & MIPS_CONFIG1_IS_MASK) >> MIPS_CONFIG1_IS_SHIFT) + 6); } #endif /* L1 data cache. */ #ifdef MIPS_DISABLE_L1_CACHE cpuinfo->l1.dc_linesize = 0; #else #ifndef CPU_CNMIPS tmp = (cfg1 & MIPS_CONFIG1_DL_MASK) >> MIPS_CONFIG1_DL_SHIFT; if (tmp != 0) { cpuinfo->l1.dc_linesize = 1 << (tmp + 1); cpuinfo->l1.dc_nways = (((cfg1 & MIPS_CONFIG1_DA_MASK) >> MIPS_CONFIG1_DA_SHIFT)) + 1; cpuinfo->l1.dc_nsets = 1 << (((cfg1 & MIPS_CONFIG1_DS_MASK) >> MIPS_CONFIG1_DS_SHIFT) + 6); } #else /* * Some Octeon cache configuration parameters are by model family, not * config1. */ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) { /* Octeon and Octeon XL. */ cpuinfo->l1.dc_nsets = 1; cpuinfo->l1.dc_nways = 64; } else if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { /* Octeon Plus. */ cpuinfo->l1.dc_nsets = 2; cpuinfo->l1.dc_nways = 64; } else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Octeon II. */ cpuinfo->l1.dc_nsets = 8; cpuinfo->l1.dc_nways = 32; cpuinfo->l1.ic_nsets = 8; cpuinfo->l1.ic_nways = 37; } else { panic("%s: unsupported Cavium Networks CPU.", __func__); } /* All Octeon models use 128 byte line size. */ cpuinfo->l1.dc_linesize = 128; #endif #endif cpuinfo->l1.ic_size = cpuinfo->l1.ic_linesize * cpuinfo->l1.ic_nsets * cpuinfo->l1.ic_nways; cpuinfo->l1.dc_size = cpuinfo->l1.dc_linesize * cpuinfo->l1.dc_nsets * cpuinfo->l1.dc_nways; /* * Probe PageMask register to see what sizes of pages are supported * by writing all one's and then reading it back. */ mips_wr_pagemask(~0); cpuinfo->tlb_pgmask = mips_rd_pagemask(); mips_wr_pagemask(MIPS3_PGMASK_4K); #ifndef CPU_CNMIPS /* L2 cache */ if (!(cfg1 & MIPS_CONFIG_CM)) { /* We don't have valid cfg2 register */ return; } cfg2 = mips_rd_config2(); tmp = (cfg2 >> MIPS_CONFIG2_SL_SHIFT) & MIPS_CONFIG2_SL_MASK; if (0 < tmp && tmp <= 7) cpuinfo->l2.dc_linesize = 2 << tmp; tmp = (cfg2 >> MIPS_CONFIG2_SS_SHIFT) & MIPS_CONFIG2_SS_MASK; if (0 <= tmp && tmp <= 7) cpuinfo->l2.dc_nsets = 64 << tmp; tmp = (cfg2 >> MIPS_CONFIG2_SA_SHIFT) & MIPS_CONFIG2_SA_MASK; if (0 <= tmp && tmp <= 7) cpuinfo->l2.dc_nways = tmp + 1; cpuinfo->l2.dc_size = cpuinfo->l2.dc_linesize * cpuinfo->l2.dc_nsets * cpuinfo->l2.dc_nways; #endif } +void +mips_hwrena_init(void) +{ + uint32_t reg; + + reg = mips_rd_hwrena(); + + if (cpuinfo.userlocal_reg) + reg |= MIPS_HWRENA_UL; + + mips_wr_hwrena(reg); +} + void mips_cpu_init(void) { platform_cpu_init(); mips_get_identity(&cpuinfo); + mips_hwrena_init(); num_tlbentries = cpuinfo.tlb_nentries; mips_wr_wired(0); tlb_invalidate_all(); mips_wr_wired(VMWIRED_ENTRIES); mips_config_cache(&cpuinfo); mips_vector_init(); mips_icache_sync_all(); mips_dcache_wbinv_all(); } void cpu_identify(void) { uint32_t cfg0, cfg1, cfg2, cfg3; #if defined(CPU_MIPS1004K) || defined (CPU_MIPS74K) || defined (CPU_MIPS24K) uint32_t cfg7; #endif printf("CPU: "); switch (cpuinfo.cpu_vendor) { case MIPS_PRID_CID_MTI: printf("MIPS Technologies"); break; case MIPS_PRID_CID_BROADCOM: case MIPS_PRID_CID_SIBYTE: printf("Broadcom"); break; case MIPS_PRID_CID_ALCHEMY: printf("AMD"); break; case MIPS_PRID_CID_SANDCRAFT: printf("Sandcraft"); break; case MIPS_PRID_CID_PHILIPS: printf("Philips"); break; case MIPS_PRID_CID_TOSHIBA: printf("Toshiba"); break; case MIPS_PRID_CID_LSI: printf("LSI"); break; case MIPS_PRID_CID_LEXRA: printf("Lexra"); break; case MIPS_PRID_CID_RMI: printf("RMI"); break; case MIPS_PRID_CID_CAVIUM: printf("Cavium"); break; case MIPS_PRID_CID_INGENIC: case MIPS_PRID_CID_INGENIC2: printf("Ingenic XBurst"); break; case MIPS_PRID_CID_PREHISTORIC: default: printf("Unknown cid %#x", cpuinfo.cpu_vendor); break; } if (cpu_model[0] != '\0') printf(" (%s)", cpu_model); printf(" processor v%d.%d\n", cpuinfo.cpu_rev, cpuinfo.cpu_impl); printf(" MMU: "); if (cpuinfo.tlb_type == MIPS_MMU_NONE) { printf("none present\n"); } else { if (cpuinfo.tlb_type == MIPS_MMU_TLB) { printf("Standard TLB"); } else if (cpuinfo.tlb_type == MIPS_MMU_BAT) { printf("Standard BAT"); } else if (cpuinfo.tlb_type == MIPS_MMU_FIXED) { printf("Fixed mapping"); } printf(", %d entries ", cpuinfo.tlb_nentries); } if (cpuinfo.tlb_pgmask) { printf("("); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_MASKX) printf("1K "); printf("4K "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_16K) printf("16K "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_64K) printf("64K "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_256K) printf("256K "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_1M) printf("1M "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_16M) printf("16M "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_64M) printf("64M "); if (cpuinfo.tlb_pgmask & MIPS3_PGMASK_256M) printf("256M "); printf("pg sizes)"); } printf("\n"); printf(" L1 i-cache: "); if (cpuinfo.l1.ic_linesize == 0) { printf("disabled"); } else { if (cpuinfo.l1.ic_nways == 1) { printf("direct-mapped with"); } else { printf ("%d ways of", cpuinfo.l1.ic_nways); } printf(" %d sets, %d bytes per line\n", cpuinfo.l1.ic_nsets, cpuinfo.l1.ic_linesize); } printf(" L1 d-cache: "); if (cpuinfo.l1.dc_linesize == 0) { printf("disabled"); } else { if (cpuinfo.l1.dc_nways == 1) { printf("direct-mapped with"); } else { printf ("%d ways of", cpuinfo.l1.dc_nways); } printf(" %d sets, %d bytes per line\n", cpuinfo.l1.dc_nsets, cpuinfo.l1.dc_linesize); } printf(" L2 cache: "); if (cpuinfo.l2.dc_linesize == 0) { printf("disabled\n"); } else { printf("%d ways of %d sets, %d bytes per line, " "%d KiB total size\n", cpuinfo.l2.dc_nways, cpuinfo.l2.dc_nsets, cpuinfo.l2.dc_linesize, cpuinfo.l2.dc_size / 1024); } cfg0 = mips_rd_config(); /* If config register selection 1 does not exist, exit. */ if (!(cfg0 & MIPS_CONFIG_CM)) return; cfg1 = mips_rd_config1(); printf(" Config1=0x%b\n", cfg1, "\20\7COP2\6MDMX\5PerfCount\4WatchRegs\3MIPS16\2EJTAG\1FPU"); if (cpuinfo.fpu_id != 0) printf(" FPU ID=0x%b\n", cpuinfo.fpu_id, "\020" "\020S" "\021D" "\022PS" "\0233D" "\024W" "\025L" "\026F64" "\0272008" "\034UFRP"); /* If config register selection 2 does not exist, exit. */ if (!(cfg1 & MIPS_CONFIG_CM)) return; cfg2 = mips_rd_config2(); /* * Config2 contains no useful information other then Config3 * existence flag */ printf(" Config2=0x%08x\n", cfg2); /* If config register selection 3 does not exist, exit. */ if (!(cfg2 & MIPS_CONFIG_CM)) return; cfg3 = mips_rd_config3(); /* Print Config3 if it contains any useful info */ if (cfg3 & ~(0x80000000)) printf(" Config3=0x%b\n", cfg3, "\20\16ULRI\2SmartMIPS\1TraceLogic"); #if defined(CPU_MIPS1004K) || defined (CPU_MIPS74K) || defined (CPU_MIPS24K) cfg7 = mips_rd_config7(); printf(" Config7=0x%b\n", cfg7, "\20\40WII\21AR"); #endif } static struct rman cpu_hardirq_rman; static devclass_t cpu_devclass; /* * Device methods */ static int cpu_probe(device_t); static int cpu_attach(device_t); static struct resource *cpu_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int cpu_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *f, driver_intr_t *, void *, void **); static device_method_t cpu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpu_probe), DEVMETHOD(device_attach, cpu_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* Bus interface */ DEVMETHOD(bus_alloc_resource, cpu_alloc_resource), DEVMETHOD(bus_setup_intr, cpu_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), { 0, 0 } }; static driver_t cpu_driver = { "cpu", cpu_methods, 1 }; static int cpu_probe(device_t dev) { return (0); } static int cpu_attach(device_t dev) { int error; #ifdef notyet device_t clock; #endif cpu_hardirq_rman.rm_start = 0; cpu_hardirq_rman.rm_end = 5; cpu_hardirq_rman.rm_type = RMAN_ARRAY; cpu_hardirq_rman.rm_descr = "CPU Hard Interrupts"; error = rman_init(&cpu_hardirq_rman); if (error != 0) { device_printf(dev, "failed to initialize irq resources\n"); return (error); } /* XXX rman_manage_all. */ error = rman_manage_region(&cpu_hardirq_rman, cpu_hardirq_rman.rm_start, cpu_hardirq_rman.rm_end); if (error != 0) { device_printf(dev, "failed to manage irq resources\n"); return (error); } if (device_get_unit(dev) != 0) panic("can't attach more cpus"); device_set_desc(dev, "MIPS32 processor"); #ifdef notyet clock = device_add_child(dev, "clock", device_get_unit(dev)); if (clock == NULL) device_printf(dev, "clock failed to attach"); #endif return (bus_generic_attach(dev)); } static struct resource * cpu_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; if (type != SYS_RES_IRQ) return (NULL); res = rman_reserve_resource(&cpu_hardirq_rman, start, end, count, 0, child); return (res); } static int cpu_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { int error; int intr; error = rman_activate_resource(res); if (error != 0) { device_printf(child, "could not activate irq\n"); return (error); } intr = rman_get_start(res); cpu_establish_hardintr(device_get_nameunit(child), filt, handler, arg, intr, flags, cookiep); device_printf(child, "established CPU interrupt %d\n", intr); return (0); } DRIVER_MODULE(cpu, root, cpu_driver, cpu_devclass, 0, 0); diff --git a/sys/mips/mips/mp_machdep.c b/sys/mips/mips/mp_machdep.c index dfc3b59f0533..8a5903b2b514 100644 --- a/sys/mips/mips/mp_machdep.c +++ b/sys/mips/mips/mp_machdep.c @@ -1,373 +1,376 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Neelkanth Natu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include struct pcb stoppcbs[MAXCPU]; static void *dpcpu; static struct mtx ap_boot_mtx; static volatile int aps_ready; static volatile int mp_naps; static void ipi_send(struct pcpu *pc, int ipi) { CTR3(KTR_SMP, "%s: cpu=%d, ipi=%x", __func__, pc->pc_cpuid, ipi); atomic_set_32(&pc->pc_pending_ipis, ipi); platform_ipi_send(pc->pc_cpuid); CTR1(KTR_SMP, "%s: sent", __func__); } void ipi_all_but_self(int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); ipi_selected(other_cpus, ipi); } /* Send an IPI to a set of cpus. */ void ipi_selected(cpuset_t cpus, int ipi) { struct pcpu *pc; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (CPU_ISSET(pc->pc_cpuid, &cpus)) { CTR3(KTR_SMP, "%s: pc: %p, ipi: %x\n", __func__, pc, ipi); ipi_send(pc, ipi); } } } /* Send an IPI to a specific CPU. */ void ipi_cpu(int cpu, u_int ipi) { CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x\n", __func__, cpu, ipi); ipi_send(cpuid_to_pcpu[cpu], ipi); } /* * Handle an IPI sent to this processor. */ static int mips_ipi_handler(void *arg) { u_int cpu, ipi, ipi_bitmap; int bit; cpu = PCPU_GET(cpuid); platform_ipi_clear(); /* quiesce the pending ipi interrupt */ ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); if (ipi_bitmap == 0) return (FILTER_STRAY); CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap); while ((bit = ffs(ipi_bitmap))) { bit = bit - 1; ipi = 1 << bit; ipi_bitmap &= ~ipi; switch (ipi) { case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); tlb_save(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } } return (FILTER_HANDLED); } static int start_ap(int cpuid) { int cpus, ms; cpus = mp_naps; dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); mips_sync(); if (platform_start_ap(cpuid) != 0) return (-1); /* could not start AP */ for (ms = 0; ms < 5000; ++ms) { if (mp_naps > cpus) return (0); /* success */ else DELAY(1000); } return (-2); /* timeout initializing AP */ } void cpu_mp_setmaxid(void) { cpuset_t cpumask; int cpu, last; platform_cpu_mask(&cpumask); mp_ncpus = 0; last = 1; while ((cpu = CPU_FFS(&cpumask)) != 0) { last = cpu; cpu--; CPU_CLR(cpu, &cpumask); mp_ncpus++; } if (mp_ncpus <= 0) mp_ncpus = 1; mp_maxid = min(last, MAXCPU) - 1; } void cpu_mp_announce(void) { /* NOTHING */ } struct cpu_group * cpu_topo(void) { return (platform_smp_topo()); } int cpu_mp_probe(void) { return (mp_ncpus > 1); } void cpu_mp_start(void) { int error, cpuid; cpuset_t cpumask; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_ZERO(&all_cpus); platform_cpu_mask(&cpumask); while (!CPU_EMPTY(&cpumask)) { cpuid = CPU_FFS(&cpumask) - 1; CPU_CLR(cpuid, &cpumask); if (cpuid >= MAXCPU) { printf("cpu_mp_start: ignoring AP #%d.\n", cpuid); continue; } if (cpuid != platform_processor_id()) { if ((error = start_ap(cpuid)) != 0) { printf("AP #%d failed to start: %d\n", cpuid, error); continue; } if (bootverbose) printf("AP #%d started!\n", cpuid); } CPU_SET(cpuid, &all_cpus); } } void smp_init_secondary(u_int32_t cpuid) { + mips_hwrena_init(); + /* TLB */ mips_wr_wired(0); tlb_invalidate_all(); mips_wr_wired(VMWIRED_ENTRIES); /* * We assume that the L1 cache on the APs is identical to the one * on the BSP. */ mips_dcache_wbinv_all(); mips_icache_sync_all(); mips_sync(); mips_wr_entryhi(0); pcpu_init(PCPU_ADDR(cpuid), cpuid, sizeof(struct pcpu)); dpcpu_init(dpcpu, cpuid); /* The AP has initialized successfully - allow the BSP to proceed */ ++mp_naps; /* Spin until the BSP is ready to release the APs */ while (!aps_ready) ; #ifdef PLATFORM_INIT_SECONDARY platform_init_secondary(cpuid); #endif /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); schedinit_ap(); mtx_lock_spin(&ap_boot_mtx); smp_cpus++; CTR1(KTR_SMP, "SMP: AP CPU #%d launched", PCPU_GET(cpuid)); if (bootverbose) printf("SMP: AP CPU #%d launched.\n", PCPU_GET(cpuid)); if (smp_cpus == mp_ncpus) { atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); while (smp_started == 0) ; /* nothing */ /* Start per-CPU event timers. */ cpu_initclocks_ap(); /* enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } static void release_aps(void *dummy __unused) { int ipi_irq; if (mp_ncpus == 1) return; #ifdef PLATFORM_INIT_SECONDARY platform_init_secondary(0); #endif /* * IPI handler */ ipi_irq = platform_ipi_hardintr_num(); if (ipi_irq != -1) { cpu_establish_hardintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq, INTR_TYPE_MISC | INTR_EXCL, NULL); } else { ipi_irq = platform_ipi_softintr_num(); cpu_establish_softintr("ipi", mips_ipi_handler, NULL, NULL, ipi_irq, INTR_TYPE_MISC | INTR_EXCL, NULL); } atomic_store_rel_int(&aps_ready, 1); while (smp_started == 0) ; /* nothing */ } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);