diff --git a/stand/powerpc/ofw/cas.c b/stand/powerpc/ofw/cas.c index 4f36a147f36d..6292e04794a8 100644 --- a/stand/powerpc/ofw/cas.c +++ b/stand/powerpc/ofw/cas.c @@ -1,259 +1,259 @@ /*- * Copyright (c) 2019 Leandro Lupori * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include /* #define CAS_DEBUG */ #ifdef CAS_DEBUG #define DPRINTF(fmt, ...) printf(fmt, ## __VA_ARGS__) #else #define DPRINTF(fmt, ...) do { ; } while (0) #endif /* PVR */ #define PVR_CPU_P8E 0x004b0000 #define PVR_CPU_P8NVL 0x004c0000 #define PVR_CPU_P8 0x004d0000 #define PVR_CPU_P9 0x004e0000 #define PVR_CPU_MASK 0xffff0000 #define PVR_ISA_207 0x0f000004 #define PVR_ISA_300 0x0f000005 #define PVR_ISA_MASK 0xffffffff /* loader version of kernel's CPU_MAXSIZE */ #define MAX_CPUS ((uint32_t)256u) /* Option Vectors' settings */ /* length of ignored OV */ #define OV_IGN_LEN 0 /* byte 1 (of any OV) */ #define OV_IGN 0x80 /* Option Vector 5 */ /* byte 2 */ #define OV5_LPAR 0x80 #define OV5_SPLPAR 0x40 #define OV5_DRMEM 0x20 #define OV5_LP 0x10 #define OV5_ALPHA_PART 0x08 #define OV5_DMA_DELAY 0x04 #define OV5_DONATE_CPU 0x02 #define OV5_MSI 0x01 /* 9-12: max cpus */ #define OV5_MAX_CPUS(n) ((MAX_CPUS >> (3*8 - (n)*8)) & 0xff) /* 13-14: LoPAPR Level */ #define LOPAPR_LEVEL 0x0101 /* 1.1 */ #define OV5_LOPAPR_LEVEL(n) ((LOPAPR_LEVEL >> (8 - (n)*8)) & 0xff) /* byte 17: Platform Facilities */ #define OV5_RNG 0x80 #define OV5_COMP_ENG 0x40 #define OV5_ENC_ENG 0x20 /* byte 21: Sub-Processors */ #define OV5_NO_SUBPROCS 0 #define OV5_SUBPROCS 1 /* byte 23: interrupt controller */ #define OV5_INTC_XICS 0 /* byte 24: MMU */ #define OV5_MMU_INDEX 24 #define OV5_MMU_HPT 0 #define OV5_MMU_RADIX 0x40 #define OV5_MMU_EITHER 0x80 #define OV5_MMU_DYNAMIC 0xc0 /* byte 25: HPT MMU Extensions */ #define OV5_HPT_EXT_INDEX 25 #define OV5_HPT_GTSE 0x40 /* byte 26: Radix MMU Extensions */ #define OV5_RADIX_EXT_INDEX 26 #define OV5_RADIX_GTSE 0x40 struct pvr { uint32_t mask; uint32_t val; }; struct opt_vec_ignore { char data[2]; } __packed; struct opt_vec4 { char data[3]; } __packed; struct opt_vec5 { char data[27]; } __packed; static struct ibm_arch_vec { struct pvr pvr_list[7]; uint8_t num_opts; struct opt_vec_ignore vec1; struct opt_vec_ignore vec2; struct opt_vec_ignore vec3; struct opt_vec4 vec4; struct opt_vec5 vec5; } __packed ibm_arch_vec = { /* pvr_list */ { { PVR_CPU_MASK, PVR_CPU_P8 }, /* POWER8 */ { PVR_CPU_MASK, PVR_CPU_P8E }, /* POWER8E */ { PVR_CPU_MASK, PVR_CPU_P8NVL }, /* POWER8NVL */ { PVR_CPU_MASK, PVR_CPU_P9 }, /* POWER9 */ { PVR_ISA_MASK, PVR_ISA_207 }, /* All ISA 2.07 */ { PVR_ISA_MASK, PVR_ISA_300 }, /* All ISA 3.00 */ { 0, 0xffffffffu } /* terminator */ }, 4, /* num_opts (4 actually means 5 option vectors) */ { OV_IGN_LEN, OV_IGN }, /* OV1 */ { OV_IGN_LEN, OV_IGN }, /* OV2 */ { OV_IGN_LEN, OV_IGN }, /* OV3 */ /* OV4 (can't be ignored) */ { sizeof(struct opt_vec4) - 2, /* length (n-2) */ 0, 10 /* Minimum VP entitled capacity percentage * 100 * (if absent assume 10%) */ }, /* OV5 */ { sizeof(struct opt_vec5) - 2, /* length (n-2) */ 0, /* don't ignore */ OV5_LPAR | OV5_SPLPAR | OV5_LP | OV5_MSI, 0, 0, /* Cooperative Memory Over-commitment */ 0, /* Associativity Information Option */ 0, /* Binary Option Controls */ 0, /* Reserved */ 0, /* Reserved */ OV5_MAX_CPUS(0), OV5_MAX_CPUS(1), /* 10 */ OV5_MAX_CPUS(2), OV5_MAX_CPUS(3), OV5_LOPAPR_LEVEL(0), OV5_LOPAPR_LEVEL(1), 0, /* Reserved */ 0, /* Reserved */ 0, /* Platform Facilities */ 0, /* Reserved */ 0, /* Reserved */ 0, /* Reserved */ /* 20 */ OV5_NO_SUBPROCS, 0, /* DRMEM_V2 */ OV5_INTC_XICS, OV5_MMU_HPT, 0, 0 } }; int ppc64_cas(void) { phandle_t pkg; ihandle_t inst; cell_t err; uint8_t buf[16], idx, val; int i, len, rc, radix_mmu; const char *var; char *ov5; pkg = OF_finddevice("/chosen"); if (pkg == -1) { printf("cas: couldn't find /chosen\n"); return (-1); } len = OF_getprop(pkg, "ibm,arch-vec-5-platform-support", buf, sizeof(buf)); if (len == -1) /* CAS not supported */ return (0); radix_mmu = 0; ov5 = ibm_arch_vec.vec5.data; for (i = 0; i < len; i += 2) { idx = buf[i]; val = buf[i + 1]; DPRINTF("idx 0x%02x val 0x%02x\n", idx, val); switch (idx) { case OV5_MMU_INDEX: /* * Note that testing for OV5_MMU_RADIX/OV5_MMU_EITHER * also covers OV5_MMU_DYNAMIC. */ if ((val & OV5_MMU_RADIX) || (val & OV5_MMU_EITHER)) radix_mmu = 1; break; case OV5_RADIX_EXT_INDEX: if (val & OV5_RADIX_GTSE) ov5[idx] = OV5_RADIX_GTSE; break; case OV5_HPT_EXT_INDEX: default: break; } } - if (radix_mmu && (var = getenv("radix_mmu")) != NULL && var[0] != '0') - ov5[OV5_MMU_INDEX] = OV5_MMU_RADIX; - else + if ((var = getenv("radix_mmu")) != NULL && var[0] == '0') radix_mmu = 0; + if (radix_mmu) + ov5[OV5_MMU_INDEX] = OV5_MMU_RADIX; inst = OF_open("/"); if (inst == -1) { printf("cas: failed to open / node\n"); return (-1); } DPRINTF("MMU 0x%02x RADIX_EXT 0x%02x\n", ov5[OV5_MMU_INDEX], ov5[OV5_RADIX_EXT_INDEX]); rc = OF_call_method("ibm,client-architecture-support", inst, 1, 1, &ibm_arch_vec, &err); if (rc != 0 || err) { printf("cas: CAS method returned an error: rc %d err %jd\n", rc, (intmax_t)err); rc = -1; } OF_close(inst); printf("cas: selected %s MMU\n", radix_mmu ? "radix" : "hash"); return (rc); } diff --git a/sys/powerpc/aim/aim_machdep.c b/sys/powerpc/aim/aim_machdep.c index 914296440104..c5f84675ebd3 100644 --- a/sys/powerpc/aim/aim_machdep.c +++ b/sys/powerpc/aim/aim_machdep.c @@ -1,813 +1,815 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __powerpc64__ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __powerpc64__ #include "mmu_oea64.h" #endif #ifndef __powerpc64__ struct bat battable[16]; #endif int radix_mmu = 0; #ifndef __powerpc64__ /* Bits for running on 64-bit systems in 32-bit mode. */ extern void *testppc64, *testppc64size; extern void *restorebridge, *restorebridgesize; extern void *rfid_patch, *rfi_patch1, *rfi_patch2; extern void *trapcode64; extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; #endif extern void *rstcode, *rstcodeend; extern void *trapcode, *trapcodeend; extern void *hypertrapcode, *hypertrapcodeend; extern void *generictrap, *generictrap64; extern void *alitrap, *aliend; extern void *dsitrap, *dsiend; extern void *decrint, *decrsize; extern void *extint, *extsize; extern void *dblow, *dbend; extern void *imisstrap, *imisssize; extern void *dlmisstrap, *dlmisssize; extern void *dsmisstrap, *dsmisssize; extern void *ap_pcpu; extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, uint32_t mdp_cookie); void aim_cpu_init(vm_offset_t toc); void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, uint32_t mdp_cookie) { register_t scratch; /* * If running from an FDT, make sure we are in real mode to avoid * tromping on firmware page tables. Everything in the kernel assumes * 1:1 mappings out of firmware, so this won't break anything not * already broken. This doesn't work if there is live OF, since OF * may internally use non-1:1 mappings. */ if (ofentry == 0) mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); #ifdef __powerpc64__ /* * Relocate to high memory so that the kernel * can execute from the direct map. * * If we are in virtual mode already, use a special entry point * that sets up a temporary DMAP to execute from until we can * properly set up the MMU. */ if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) { if (mfmsr() & PSL_DR) { __restartkernel_virtual(fdt, 0, ofentry, mdp, mdp_cookie, DMAP_BASE_ADDRESS, mfmsr()); } else { __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie, DMAP_BASE_ADDRESS, mfmsr()); } } #endif /* Various very early CPU fix ups */ switch (mfpvr() >> 16) { /* * PowerPC 970 CPUs have a misfeature requested by Apple that * makes them pretend they have a 32-byte cacheline. Turn this * off before we measure the cacheline size. */ case IBM970: case IBM970FX: case IBM970MP: case IBM970GX: scratch = mfspr(SPR_HID5); scratch &= ~HID5_970_DCBZ_SIZE_HI; mtspr(SPR_HID5, scratch); break; #ifdef __powerpc64__ case IBMPOWER7: case IBMPOWER7PLUS: case IBMPOWER8: case IBMPOWER8E: case IBMPOWER8NVL: case IBMPOWER9: /* XXX: get from ibm,slb-size in device tree */ n_slbs = 32; break; #endif } } void aim_cpu_init(vm_offset_t toc) { size_t trap_offset, trapsize; vm_offset_t trap; register_t msr; uint8_t *cache_check; int cacheline_warn; #ifndef __powerpc64__ register_t scratch; int ppc64; #endif trap_offset = 0; cacheline_warn = 0; /* General setup for AIM CPUs */ psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI; #ifdef __powerpc64__ psl_kernset |= PSL_SF; if (mfmsr() & PSL_HV) psl_kernset |= PSL_HV; #if BYTE_ORDER == LITTLE_ENDIAN psl_kernset |= PSL_LE; #endif #endif psl_userset = psl_kernset | PSL_PR; #ifdef __powerpc64__ psl_userset32 = psl_userset & ~PSL_SF; #endif /* * Zeroed bits in this variable signify that the value of the bit * in its position is allowed to vary between userspace contexts. * * All other bits are required to be identical for every userspace * context. The actual *value* of the bit is determined by * psl_userset and/or psl_userset32, and is not allowed to change. * * Remember to update this set when implementing support for * *conditionally* enabling a processor facility. Failing to do * this will cause swapcontext() in userspace to break when a * process uses a conditionally-enabled facility. * * When *unconditionally* implementing support for a processor * facility, update psl_userset / psl_userset32 instead. * * See the access control check in set_mcontext(). */ psl_userstatic = ~(PSL_VSX | PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); /* * Mask bits from the SRR1 that aren't really the MSR: * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64) */ psl_userstatic &= ~0x783f0000UL; /* * Initialize the interrupt tables and figure out our cache line * size and whether or not we need the 64-bit bridge code. */ /* * Disable translation in case the vector area hasn't been * mapped (G5). Note that no OFW calls can be made until * translation is re-enabled. */ msr = mfmsr(); mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); /* * Measure the cacheline size using dcbz * * Use EXC_PGM as a playground. We are about to overwrite it * anyway, we know it exists, and we know it is cache-aligned. */ cache_check = (void *)EXC_PGM; for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) cache_check[cacheline_size] = 0xff; __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); /* Find the first byte dcbz did not zero to get the cache line size */ for (cacheline_size = 0; cacheline_size < 0x100 && cache_check[cacheline_size] == 0; cacheline_size++); /* Work around psim bug */ if (cacheline_size == 0) { cacheline_warn = 1; cacheline_size = 32; } #ifndef __powerpc64__ /* * Figure out whether we need to use the 64 bit PMAP. This works by * executing an instruction that is only legal on 64-bit PPC (mtmsrd), * and setting ppc64 = 0 if that causes a trap. */ ppc64 = 1; bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); __syncicache((void *)EXC_PGM, (size_t)&testppc64size); __asm __volatile("\ mfmsr %0; \ mtsprg2 %1; \ \ mtmsrd %0; \ mfsprg2 %1;" : "=r"(scratch), "=r"(ppc64)); if (ppc64) cpu_features |= PPC_FEATURE_64; /* * Now copy restorebridge into all the handlers, if necessary, * and set up the trap tables. */ if (cpu_features & PPC_FEATURE_64) { /* Patch the two instances of rfi -> rfid */ bcopy(&rfid_patch,&rfi_patch1,4); #ifdef KDB /* rfi_patch2 is at the end of dbleave */ bcopy(&rfid_patch,&rfi_patch2,4); #endif } #else /* powerpc64 */ cpu_features |= PPC_FEATURE_64; #endif trapsize = (size_t)&trapcodeend - (size_t)&trapcode; /* * Copy generic handler into every possible trap. Special cases will get * different ones in a minute. */ for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) bcopy(&trapcode, (void *)trap, trapsize); #ifndef __powerpc64__ if (cpu_features & PPC_FEATURE_64) { /* * Copy a code snippet to restore 32-bit bridge mode * to the top of every non-generic trap handler */ trap_offset += (size_t)&restorebridgesize; bcopy(&restorebridge, (void *)EXC_RST, trap_offset); bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); } else { /* * Use an IBAT and a DBAT to map the bottom 256M segment. * * It is very important to do it *now* to avoid taking a * fault in .text / .data before the MMU is bootstrapped, * because until then, the translation data has not been * copied over from OpenFirmware, so our DSI/ISI will fail * to find a match. */ battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); __asm (".balign 32; \n" "mtibatu 0,%0; mtibatl 0,%1; isync; \n" "mtdbatu 0,%0; mtdbatl 0,%1; isync" :: "r"(battable[0].batu), "r"(battable[0].batl)); } #else trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode; bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize); #endif bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - (size_t)&rstcode); #ifdef KDB bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - (size_t)&dblow); #endif bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - (size_t)&alitrap); bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - (size_t)&dsitrap); /* Set address of generictrap for self-reloc calculations */ *((void **)TRAP_GENTRAP) = &generictrap; #ifdef __powerpc64__ /* Set TOC base so that the interrupt code can get at it */ *((void **)TRAP_ENTRY) = &generictrap; *((register_t *)TRAP_TOCBASE) = toc; #else /* Set branch address for trap code */ if (cpu_features & PPC_FEATURE_64) *((void **)TRAP_ENTRY) = &generictrap64; else *((void **)TRAP_ENTRY) = &generictrap; *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; /* G2-specific TLB miss helper handlers */ bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); #endif __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); /* * Restore MSR */ mtmsr(msr); /* Warn if cachline size was not determined */ if (cacheline_warn == 1) { printf("WARNING: cacheline size undetermined, setting to 32\n"); } /* * Initialise virtual memory. Use BUS_PROBE_GENERIC priority * in case the platform module had a better idea of what we * should do. */ if (radix_mmu) pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC); else if (cpu_features & PPC_FEATURE_64) pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); } /* * Shutdown the CPU as much as possible. */ void cpu_halt(void) { OF_exit(); } int ptrace_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 |= PSL_SE; return (0); } int ptrace_clear_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 &= ~PSL_SE; return (0); } void kdb_cpu_clear_singlestep(void) { kdb_frame->srr1 &= ~PSL_SE; } void kdb_cpu_set_singlestep(void) { kdb_frame->srr1 |= PSL_SE; } /* * Initialise a struct pcpu. */ void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) { #ifdef __powerpc64__ /* Copy the SLB contents from the current CPU */ memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb)); #endif } /* Return 0 on handled success, otherwise signal number. */ int cpu_machine_check(struct thread *td, struct trapframe *frame, int *ucode) { #ifdef __powerpc64__ /* * This block is 64-bit CPU specific currently. Punt running in 32-bit * mode on 64-bit CPUs. */ /* Check if the important information is in DSISR */ if ((frame->srr1 & SRR1_MCHK_DATA) != 0) { printf("Machine check, DSISR: %016lx\n", frame->cpu.aim.dsisr); /* SLB multi-hit is recoverable. */ if ((frame->cpu.aim.dsisr & DSISR_MC_SLB_MULTIHIT) != 0) return (0); if ((frame->cpu.aim.dsisr & (DSISR_MC_DERAT_MULTIHIT | DSISR_MC_TLB_MULTIHIT)) != 0) { pmap_tlbie_all(); return (0); } /* TODO: Add other machine check recovery procedures. */ } else { if ((frame->srr1 & SRR1_MCHK_IFETCH_M) == SRR1_MCHK_IFETCH_SLBMH) return (0); } #endif *ucode = BUS_OBJERR; return (SIGBUS); } #ifndef __powerpc64__ uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); } #endif void pmap_early_io_map_init(void) { if ((cpu_features2 & PPC_FEATURE2_ARCH_3_00) == 0) radix_mmu = 0; - else + else { + radix_mmu = 1; TUNABLE_INT_FETCH("radix_mmu", &radix_mmu); + } /* * When using Radix, set the start and end of kva early, to be able to * use KVAs on pmap_early_io_map and avoid issues when remapping them * later. */ if (radix_mmu) { virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; } } /* * These functions need to provide addresses that both (a) work in real mode * (or whatever mode/circumstances the kernel is in in early boot (now)) and * (b) can still, in principle, work once the kernel is going. Because these * rely on existing mappings/real mode, unmap is a no-op. */ vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); /* * If we have the MMU up in early boot, assume it is 1:1. Otherwise, * try to get the address in a memory region compatible with the * direct map for efficiency later. * Except for Radix MMU, for which current implementation doesn't * support mapping arbitrary virtual addresses, such as the ones * generated by "direct mapping" I/O addresses. In this case, use * addresses from KVA area. */ if (mfmsr() & PSL_DR) return (pa); else if (radix_mmu) { vm_offset_t va; va = virtual_avail; virtual_avail += round_page(size + pa - trunc_page(pa)); return (va); } else return (DMAP_BASE_ADDRESS + pa); } void pmap_early_io_unmap(vm_offset_t va, vm_size_t size) { KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); } /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ void flush_disable_caches(void) { register_t msr; register_t msscr0; register_t cache_reg; volatile uint32_t *memp; uint32_t temp; int i; int x; msr = mfmsr(); powerpc_sync(); mtmsr(msr & ~(PSL_EE | PSL_DR)); msscr0 = mfspr(SPR_MSSCR0); msscr0 &= ~MSSCR0_L2PFE; mtspr(SPR_MSSCR0, msscr0); powerpc_sync(); isync(); /* 7e00066c: dssall */ __asm__ __volatile__(".long 0x7e00066c; sync"); powerpc_sync(); isync(); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); /* Lock the L1 Data cache. */ mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); powerpc_sync(); isync(); mtspr(SPR_LDSTCR, 0); /* * Perform this in two stages: Flush the cache starting in RAM, then do it * from ROM. */ memp = (volatile uint32_t *)0x00000000; for (i = 0; i < 128 * 1024; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } memp = (volatile uint32_t *)0xfff00000; x = 0xfe; for (; x != 0xff;) { mtspr(SPR_LDSTCR, x); for (i = 0; i < 128; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } x = ((x << 1) | 1) & 0xff; } mtspr(SPR_LDSTCR, 0); cache_reg = mfspr(SPR_L2CR); if (cache_reg & L2CR_L2E) { cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); while (mfspr(SPR_L2CR) & L2CR_L2HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L2CR_L2E; mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2I); powerpc_sync(); while (mfspr(SPR_L2CR) & L2CR_L2I) ; /* Busy wait for L2 cache invalidate */ powerpc_sync(); } cache_reg = mfspr(SPR_L3CR); if (cache_reg & L3CR_L3E) { cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); while (mfspr(SPR_L3CR) & L3CR_L3HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L3CR_L3E; mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3I); powerpc_sync(); while (mfspr(SPR_L3CR) & L3CR_L3I) ; /* Busy wait for L3 cache invalidate */ powerpc_sync(); } mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); powerpc_sync(); isync(); mtmsr(msr); } #ifndef __powerpc64__ void mpc745x_sleep() { static u_quad_t timebase = 0; static register_t sprgs[4]; static register_t srrs[2]; jmp_buf resetjb; struct thread *fputd; struct thread *vectd; register_t hid0; register_t msr; register_t saved_msr; ap_pcpu = pcpup; PCPU_SET(restore, &resetjb); saved_msr = mfmsr(); fputd = PCPU_GET(fputhread); vectd = PCPU_GET(vecthread); if (fputd != NULL) save_fpu(fputd); if (vectd != NULL) save_vec(vectd); if (setjmp(resetjb) == 0) { sprgs[0] = mfspr(SPR_SPRG0); sprgs[1] = mfspr(SPR_SPRG1); sprgs[2] = mfspr(SPR_SPRG2); sprgs[3] = mfspr(SPR_SPRG3); srrs[0] = mfspr(SPR_SRR0); srrs[1] = mfspr(SPR_SRR1); timebase = mftb(); powerpc_sync(); flush_disable_caches(); hid0 = mfspr(SPR_HID0); hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; powerpc_sync(); isync(); msr = mfmsr() | PSL_POW; mtspr(SPR_HID0, hid0); powerpc_sync(); while (1) mtmsr(msr); } /* XXX: The mttb() means this *only* works on single-CPU systems. */ mttb(timebase); PCPU_SET(curthread, curthread); PCPU_SET(curpcb, curthread->td_pcb); pmap_activate(curthread); powerpc_sync(); mtspr(SPR_SPRG0, sprgs[0]); mtspr(SPR_SPRG1, sprgs[1]); mtspr(SPR_SPRG2, sprgs[2]); mtspr(SPR_SPRG3, sprgs[3]); mtspr(SPR_SRR0, srrs[0]); mtspr(SPR_SRR1, srrs[1]); mtmsr(saved_msr); if (fputd == curthread) enable_fpu(curthread); if (vectd == curthread) enable_vec(curthread); powerpc_sync(); } #endif diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 5a47b6d8bf31..8aa09811c95b 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -1,257 +1,257 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2005 Peter Grehan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Dispatch MI pmap calls to the appropriate MMU implementation * through a previously registered kernel object. * * Before pmap_bootstrap() can be called, a CPU module must have * called pmap_mmu_install(). This may be called multiple times: * the highest priority call will be installed as the default * MMU handler when pmap_bootstrap() is called. * * It is required that mutex_init() be called before pmap_bootstrap(), * as the PMAP layer makes extensive use of mutexes. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include mmu_t mmu_obj; /* * pmap globals */ struct pmap kernel_pmap_store; vm_offset_t msgbuf_phys; vm_offset_t kernel_vm_end; vm_offset_t virtual_avail; vm_offset_t virtual_end; caddr_t crashdumpmap; int pmap_bootstrapped; /* Default level 0 reservations consist of 512 pages (2MB superpage). */ int vm_level_0_order = 9; SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); -int superpages_enabled = 0; +int superpages_enabled = 1; SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled, CTLFLAG_RDTUN, &superpages_enabled, 0, "Enable support for transparent superpages"); #ifdef AIM int pvo_vaddr_compare(struct pvo_entry *a, struct pvo_entry *b) { if (PVO_VADDR(a) < PVO_VADDR(b)) return (-1); else if (PVO_VADDR(a) > PVO_VADDR(b)) return (1); return (0); } RB_GENERATE(pvo_tree, pvo_entry, pvo_plink, pvo_vaddr_compare); #endif static int pmap_nomethod(void) { return (0); } #define DEFINE_PMAP_IFUNC(ret, func, args) \ DEFINE_IFUNC(, ret, pmap_##func, args) { \ pmap_##func##_t f; \ f = PMAP_RESOLVE_FUNC(func); \ return (f != NULL ? f : (pmap_##func##_t)pmap_nomethod);\ } #define DEFINE_DUMPSYS_IFUNC(ret, func, args) \ DEFINE_IFUNC(, ret, dumpsys_##func, args) { \ pmap_dumpsys_##func##_t f; \ f = PMAP_RESOLVE_FUNC(dumpsys_##func); \ return (f != NULL ? f : (pmap_dumpsys_##func##_t)pmap_nomethod);\ } DEFINE_PMAP_IFUNC(void, activate, (struct thread *)); DEFINE_PMAP_IFUNC(void, advise, (pmap_t, vm_offset_t, vm_offset_t, int)); DEFINE_PMAP_IFUNC(void, align_superpage, (vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t)); DEFINE_PMAP_IFUNC(void, clear_modify, (vm_page_t)); DEFINE_PMAP_IFUNC(void, copy, (pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t)); DEFINE_PMAP_IFUNC(int, enter, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t)); DEFINE_PMAP_IFUNC(void, enter_quick, (pmap_t, vm_offset_t, vm_page_t, vm_prot_t)); DEFINE_PMAP_IFUNC(void, enter_object, (pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t)); DEFINE_PMAP_IFUNC(vm_paddr_t, extract, (pmap_t, vm_offset_t)); DEFINE_PMAP_IFUNC(vm_page_t, extract_and_hold, (pmap_t, vm_offset_t, vm_prot_t)); DEFINE_PMAP_IFUNC(void, kenter, (vm_offset_t, vm_paddr_t)); DEFINE_PMAP_IFUNC(void, kenter_attr, (vm_offset_t, vm_paddr_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(vm_paddr_t, kextract, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, kremove, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, object_init_pt, (pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t)); DEFINE_PMAP_IFUNC(boolean_t, is_modified, (vm_page_t)); DEFINE_PMAP_IFUNC(boolean_t, is_prefaultable, (pmap_t, vm_offset_t)); DEFINE_PMAP_IFUNC(boolean_t, is_referenced, (vm_page_t)); DEFINE_PMAP_IFUNC(boolean_t, page_exists_quick, (pmap_t, vm_page_t)); DEFINE_PMAP_IFUNC(void, page_init, (vm_page_t)); DEFINE_PMAP_IFUNC(boolean_t, page_is_mapped, (vm_page_t)); DEFINE_PMAP_IFUNC(int, page_wired_mappings, (vm_page_t)); DEFINE_PMAP_IFUNC(void, protect, (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)); DEFINE_PMAP_IFUNC(bool, ps_enabled, (pmap_t)); DEFINE_PMAP_IFUNC(void, qenter, (vm_offset_t, vm_page_t *, int)); DEFINE_PMAP_IFUNC(void, qremove, (vm_offset_t, int)); DEFINE_PMAP_IFUNC(vm_offset_t, quick_enter_page, (vm_page_t)); DEFINE_PMAP_IFUNC(void, quick_remove_page, (vm_offset_t)); DEFINE_PMAP_IFUNC(boolean_t, ts_referenced, (vm_page_t)); DEFINE_PMAP_IFUNC(void, release, (pmap_t)); DEFINE_PMAP_IFUNC(void, remove, (pmap_t, vm_offset_t, vm_offset_t)); DEFINE_PMAP_IFUNC(void, remove_all, (vm_page_t)); DEFINE_PMAP_IFUNC(void, remove_pages, (pmap_t)); DEFINE_PMAP_IFUNC(void, remove_write, (vm_page_t)); DEFINE_PMAP_IFUNC(void, unwire, (pmap_t, vm_offset_t, vm_offset_t)); DEFINE_PMAP_IFUNC(void, zero_page, (vm_page_t)); DEFINE_PMAP_IFUNC(void, zero_page_area, (vm_page_t, int, int)); DEFINE_PMAP_IFUNC(void, copy_page, (vm_page_t, vm_page_t)); DEFINE_PMAP_IFUNC(void, copy_pages, (vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], vm_offset_t b_offset, int xfersize)); DEFINE_PMAP_IFUNC(void, growkernel, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, init, (void)); DEFINE_PMAP_IFUNC(vm_offset_t, map, (vm_offset_t *, vm_paddr_t, vm_paddr_t, int)); DEFINE_PMAP_IFUNC(int, pinit, (pmap_t)); DEFINE_PMAP_IFUNC(void, pinit0, (pmap_t)); DEFINE_PMAP_IFUNC(int, mincore, (pmap_t, vm_offset_t, vm_paddr_t *)); DEFINE_PMAP_IFUNC(void, deactivate, (struct thread *)); DEFINE_PMAP_IFUNC(void, bootstrap, (vm_offset_t, vm_offset_t)); DEFINE_PMAP_IFUNC(void, cpu_bootstrap, (int)); DEFINE_PMAP_IFUNC(void *, mapdev, (vm_paddr_t, vm_size_t)); DEFINE_PMAP_IFUNC(void *, mapdev_attr, (vm_paddr_t, vm_size_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(void, page_set_memattr, (vm_page_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(void, unmapdev, (vm_offset_t, vm_size_t)); DEFINE_PMAP_IFUNC(int, map_user_ptr, (pmap_t, volatile const void *, void **, size_t, size_t *)); DEFINE_PMAP_IFUNC(int, decode_kernel_ptr, (vm_offset_t, int *, vm_offset_t *)); DEFINE_PMAP_IFUNC(boolean_t, dev_direct_mapped, (vm_paddr_t, vm_size_t)); DEFINE_PMAP_IFUNC(void, sync_icache, (pmap_t, vm_offset_t, vm_size_t)); DEFINE_PMAP_IFUNC(int, change_attr, (vm_offset_t, vm_size_t, vm_memattr_t)); DEFINE_PMAP_IFUNC(void, page_array_startup, (long)); DEFINE_PMAP_IFUNC(void, tlbie_all, (void)); DEFINE_DUMPSYS_IFUNC(void, map_chunk, (vm_paddr_t, size_t, void **)); DEFINE_DUMPSYS_IFUNC(void, unmap_chunk, (vm_paddr_t, size_t, void *)); DEFINE_DUMPSYS_IFUNC(void, pa_init, (void)); DEFINE_DUMPSYS_IFUNC(size_t, scan_pmap, (void)); DEFINE_DUMPSYS_IFUNC(void *, dump_pmap_init, (unsigned)); DEFINE_DUMPSYS_IFUNC(void *, dump_pmap, (void *, void *, u_long *)); /* * MMU install routines. Highest priority wins, equal priority also * overrides allowing last-set to win. */ SET_DECLARE(mmu_set, struct mmu_kobj); boolean_t pmap_mmu_install(char *name, int prio) { mmu_t *mmupp, mmup; static int curr_prio = 0; /* * Try and locate the MMU kobj corresponding to the name */ SET_FOREACH(mmupp, mmu_set) { mmup = *mmupp; if (mmup->name && !strcmp(mmup->name, name) && (prio >= curr_prio || mmu_obj == NULL)) { curr_prio = prio; mmu_obj = mmup; return (TRUE); } } return (FALSE); } /* MMU "pre-bootstrap" init, used to install extra resolvers, etc. */ void pmap_mmu_init() { if (mmu_obj->funcs->install != NULL) (mmu_obj->funcs->install)(); } const char * pmap_mmu_name(void) { return (mmu_obj->name); } int unmapped_buf_allowed; boolean_t pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { switch (mode) { case VM_MEMATTR_DEFAULT: case VM_MEMATTR_UNCACHEABLE: case VM_MEMATTR_CACHEABLE: case VM_MEMATTR_WRITE_COMBINING: case VM_MEMATTR_WRITE_BACK: case VM_MEMATTR_WRITE_THROUGH: case VM_MEMATTR_PREFETCHABLE: return (TRUE); default: return (FALSE); } }