diff --git a/sys/powerpc/aim/aim_machdep.c b/sys/powerpc/aim/aim_machdep.c index 0ce7cf03403f..b09089069c28 100644 --- a/sys/powerpc/aim/aim_machdep.c +++ b/sys/powerpc/aim/aim_machdep.c @@ -1,787 +1,789 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2001 Benno Rice * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef __powerpc64__ #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __powerpc64__ #include "mmu_oea64.h" #endif #ifndef __powerpc64__ struct bat battable[16]; #endif int radix_mmu = 0; #ifndef __powerpc64__ /* Bits for running on 64-bit systems in 32-bit mode. */ extern void *testppc64, *testppc64size; extern void *restorebridge, *restorebridgesize; extern void *rfid_patch, *rfi_patch1, *rfi_patch2; extern void *trapcode64; extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; #endif extern void *rstcode, *rstcodeend; extern void *trapcode, *trapcodeend; extern void *hypertrapcode, *hypertrapcodeend; extern void *generictrap, *generictrap64; extern void *alitrap, *aliend; extern void *dsitrap, *dsiend; extern void *decrint, *decrsize; extern void *extint, *extsize; extern void *dblow, *dbend; extern void *imisstrap, *imisssize; extern void *dlmisstrap, *dlmisssize; extern void *dsmisstrap, *dsmisssize; extern void *ap_pcpu; extern void __restartkernel(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); extern void __restartkernel_virtual(vm_offset_t, vm_offset_t, vm_offset_t, void *, uint32_t, register_t offset, register_t msr); void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, uint32_t mdp_cookie); void aim_cpu_init(vm_offset_t toc); void aim_early_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp, uint32_t mdp_cookie) { register_t scratch; /* * If running from an FDT, make sure we are in real mode to avoid * tromping on firmware page tables. Everything in the kernel assumes * 1:1 mappings out of firmware, so this won't break anything not * already broken. This doesn't work if there is live OF, since OF * may internally use non-1:1 mappings. */ if (ofentry == 0) mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); #ifdef __powerpc64__ /* * Relocate to high memory so that the kernel * can execute from the direct map. * * If we are in virtual mode already, use a special entry point * that sets up a temporary DMAP to execute from until we can * properly set up the MMU. */ if ((vm_offset_t)&aim_early_init < DMAP_BASE_ADDRESS) { if (mfmsr() & PSL_DR) { __restartkernel_virtual(fdt, 0, ofentry, mdp, mdp_cookie, DMAP_BASE_ADDRESS, mfmsr()); } else { __restartkernel(fdt, 0, ofentry, mdp, mdp_cookie, DMAP_BASE_ADDRESS, mfmsr()); } } #endif /* Various very early CPU fix ups */ switch (mfpvr() >> 16) { /* * PowerPC 970 CPUs have a misfeature requested by Apple that * makes them pretend they have a 32-byte cacheline. Turn this * off before we measure the cacheline size. */ case IBM970: case IBM970FX: case IBM970MP: case IBM970GX: scratch = mfspr(SPR_HID5); scratch &= ~HID5_970_DCBZ_SIZE_HI; mtspr(SPR_HID5, scratch); break; #ifdef __powerpc64__ case IBMPOWER7: case IBMPOWER7PLUS: case IBMPOWER8: case IBMPOWER8E: case IBMPOWER8NVL: case IBMPOWER9: /* XXX: get from ibm,slb-size in device tree */ n_slbs = 32; break; #endif } } void aim_cpu_init(vm_offset_t toc) { size_t trap_offset, trapsize; vm_offset_t trap; register_t msr; uint8_t *cache_check; int cacheline_warn; #ifndef __powerpc64__ register_t scratch; int ppc64; #endif trap_offset = 0; cacheline_warn = 0; /* General setup for AIM CPUs */ psl_kernset = PSL_EE | PSL_ME | PSL_IR | PSL_DR | PSL_RI; #ifdef __powerpc64__ psl_kernset |= PSL_SF; if (mfmsr() & PSL_HV) psl_kernset |= PSL_HV; #if BYTE_ORDER == LITTLE_ENDIAN psl_kernset |= PSL_LE; #endif #endif psl_userset = psl_kernset | PSL_PR; #ifdef __powerpc64__ psl_userset32 = psl_userset & ~PSL_SF; #endif /* * Zeroed bits in this variable signify that the value of the bit * in its position is allowed to vary between userspace contexts. * * All other bits are required to be identical for every userspace * context. The actual *value* of the bit is determined by * psl_userset and/or psl_userset32, and is not allowed to change. * * Remember to update this set when implementing support for * *conditionally* enabling a processor facility. Failing to do * this will cause swapcontext() in userspace to break when a * process uses a conditionally-enabled facility. * * When *unconditionally* implementing support for a processor * facility, update psl_userset / psl_userset32 instead. * * See the access control check in set_mcontext(). */ psl_userstatic = ~(PSL_VSX | PSL_VEC | PSL_FP | PSL_FE0 | PSL_FE1); /* * Mask bits from the SRR1 that aren't really the MSR: * Bits 1-4, 10-15 (ppc32), 33-36, 42-47 (ppc64) */ psl_userstatic &= ~0x783f0000UL; /* * Initialize the interrupt tables and figure out our cache line * size and whether or not we need the 64-bit bridge code. */ /* * Disable translation in case the vector area hasn't been * mapped (G5). Note that no OFW calls can be made until * translation is re-enabled. */ msr = mfmsr(); mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); /* * Measure the cacheline size using dcbz * * Use EXC_PGM as a playground. We are about to overwrite it * anyway, we know it exists, and we know it is cache-aligned. */ cache_check = (void *)EXC_PGM; for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) cache_check[cacheline_size] = 0xff; __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); /* Find the first byte dcbz did not zero to get the cache line size */ for (cacheline_size = 0; cacheline_size < 0x100 && cache_check[cacheline_size] == 0; cacheline_size++); /* Work around psim bug */ if (cacheline_size == 0) { cacheline_warn = 1; cacheline_size = 32; } #ifndef __powerpc64__ /* * Figure out whether we need to use the 64 bit PMAP. This works by * executing an instruction that is only legal on 64-bit PPC (mtmsrd), * and setting ppc64 = 0 if that causes a trap. */ ppc64 = 1; bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); __syncicache((void *)EXC_PGM, (size_t)&testppc64size); __asm __volatile("\ mfmsr %0; \ mtsprg2 %1; \ \ mtmsrd %0; \ mfsprg2 %1;" : "=r"(scratch), "=r"(ppc64)); if (ppc64) cpu_features |= PPC_FEATURE_64; /* * Now copy restorebridge into all the handlers, if necessary, * and set up the trap tables. */ if (cpu_features & PPC_FEATURE_64) { /* Patch the two instances of rfi -> rfid */ bcopy(&rfid_patch,&rfi_patch1,4); #ifdef KDB /* rfi_patch2 is at the end of dbleave */ bcopy(&rfid_patch,&rfi_patch2,4); #endif } #else /* powerpc64 */ cpu_features |= PPC_FEATURE_64; #endif trapsize = (size_t)&trapcodeend - (size_t)&trapcode; /* * Copy generic handler into every possible trap. Special cases will get * different ones in a minute. */ for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) bcopy(&trapcode, (void *)trap, trapsize); #ifndef __powerpc64__ if (cpu_features & PPC_FEATURE_64) { /* * Copy a code snippet to restore 32-bit bridge mode * to the top of every non-generic trap handler */ trap_offset += (size_t)&restorebridgesize; bcopy(&restorebridge, (void *)EXC_RST, trap_offset); bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); } else { /* * Use an IBAT and a DBAT to map the bottom 256M segment. * * It is very important to do it *now* to avoid taking a * fault in .text / .data before the MMU is bootstrapped, * because until then, the translation data has not been * copied over from OpenFirmware, so our DSI/ISI will fail * to find a match. */ battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); __asm (".balign 32; \n" "mtibatu 0,%0; mtibatl 0,%1; isync; \n" "mtdbatu 0,%0; mtdbatl 0,%1; isync" :: "r"(battable[0].batu), "r"(battable[0].batl)); } #else trapsize = (size_t)&hypertrapcodeend - (size_t)&hypertrapcode; bcopy(&hypertrapcode, (void *)(EXC_HEA + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HMI + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_HVI + trap_offset), trapsize); bcopy(&hypertrapcode, (void *)(EXC_SOFT_PATCH + trap_offset), trapsize); #endif bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - (size_t)&rstcode); #ifdef KDB bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - (size_t)&dblow); bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - (size_t)&dblow); #endif bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - (size_t)&alitrap); bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - (size_t)&dsitrap); /* Set address of generictrap for self-reloc calculations */ *((void **)TRAP_GENTRAP) = &generictrap; #ifdef __powerpc64__ /* Set TOC base so that the interrupt code can get at it */ *((void **)TRAP_ENTRY) = &generictrap; *((register_t *)TRAP_TOCBASE) = toc; #else /* Set branch address for trap code */ if (cpu_features & PPC_FEATURE_64) *((void **)TRAP_ENTRY) = &generictrap64; else *((void **)TRAP_ENTRY) = &generictrap; *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; /* G2-specific TLB miss helper handlers */ bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); #endif __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); /* * Restore MSR */ mtmsr(msr); /* Warn if cachline size was not determined */ if (cacheline_warn == 1) { printf("WARNING: cacheline size undetermined, setting to 32\n"); } /* * Initialise virtual memory. Use BUS_PROBE_GENERIC priority * in case the platform module had a better idea of what we * should do. */ if (cpu_features2 & PPC_FEATURE2_ARCH_3_00) { radix_mmu = 0; TUNABLE_INT_FETCH("radix_mmu", &radix_mmu); if (radix_mmu) pmap_mmu_install(MMU_TYPE_RADIX, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); } else if (cpu_features & PPC_FEATURE_64) pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); else pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); } /* * Shutdown the CPU as much as possible. */ void cpu_halt(void) { OF_exit(); } int ptrace_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 |= PSL_SE; return (0); } int ptrace_clear_single_step(struct thread *td) { struct trapframe *tf; tf = td->td_frame; tf->srr1 &= ~PSL_SE; return (0); } void kdb_cpu_clear_singlestep(void) { kdb_frame->srr1 &= ~PSL_SE; } void kdb_cpu_set_singlestep(void) { kdb_frame->srr1 |= PSL_SE; } /* * Initialise a struct pcpu. */ void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) { #ifdef __powerpc64__ /* Copy the SLB contents from the current CPU */ memcpy(pcpu->pc_aim.slb, PCPU_GET(aim.slb), sizeof(pcpu->pc_aim.slb)); #endif } /* Return 0 on handled success, otherwise signal number. */ int cpu_machine_check(struct thread *td, struct trapframe *frame, int *ucode) { #ifdef __powerpc64__ /* * This block is 64-bit CPU specific currently. Punt running in 32-bit * mode on 64-bit CPUs. */ /* Check if the important information is in DSISR */ if ((frame->srr1 & SRR1_MCHK_DATA) != 0) { printf("Machine check, DSISR: %016lx\n", frame->cpu.aim.dsisr); /* SLB multi-hit is recoverable. */ if ((frame->cpu.aim.dsisr & DSISR_MC_SLB_MULTIHIT) != 0) return (0); if ((frame->cpu.aim.dsisr & (DSISR_MC_DERAT_MULTIHIT | DSISR_MC_TLB_MULTIHIT)) != 0) { pmap_tlbie_all(); return (0); } /* TODO: Add other machine check recovery procedures. */ } else { if ((frame->srr1 & SRR1_MCHK_IFETCH_M) == SRR1_MCHK_IFETCH_SLBMH) return (0); } #endif *ucode = BUS_OBJERR; return (SIGBUS); } #ifndef __powerpc64__ uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); } #endif /* * These functions need to provide addresses that both (a) work in real mode * (or whatever mode/circumstances the kernel is in in early boot (now)) and * (b) can still, in principle, work once the kernel is going. Because these * rely on existing mappings/real mode, unmap is a no-op. */ vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); /* * If we have the MMU up in early boot, assume it is 1:1. Otherwise, * try to get the address in a memory region compatible with the * direct map for efficiency later. */ if (mfmsr() & PSL_DR) return (pa); else return (DMAP_BASE_ADDRESS + pa); } void pmap_early_io_unmap(vm_offset_t va, vm_size_t size) { KASSERT(!pmap_bootstrapped, ("Not available after PMAP started!")); } /* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ void flush_disable_caches(void) { register_t msr; register_t msscr0; register_t cache_reg; volatile uint32_t *memp; uint32_t temp; int i; int x; msr = mfmsr(); powerpc_sync(); mtmsr(msr & ~(PSL_EE | PSL_DR)); msscr0 = mfspr(SPR_MSSCR0); msscr0 &= ~MSSCR0_L2PFE; mtspr(SPR_MSSCR0, msscr0); powerpc_sync(); isync(); /* 7e00066c: dssall */ __asm__ __volatile__(".long 0x7e00066c; sync"); powerpc_sync(); isync(); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); /* Lock the L1 Data cache. */ mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); powerpc_sync(); isync(); mtspr(SPR_LDSTCR, 0); /* * Perform this in two stages: Flush the cache starting in RAM, then do it * from ROM. */ memp = (volatile uint32_t *)0x00000000; for (i = 0; i < 128 * 1024; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } memp = (volatile uint32_t *)0xfff00000; x = 0xfe; for (; x != 0xff;) { mtspr(SPR_LDSTCR, x); for (i = 0; i < 128; i++) { temp = *memp; __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); memp += 32/sizeof(*memp); } x = ((x << 1) | 1) & 0xff; } mtspr(SPR_LDSTCR, 0); cache_reg = mfspr(SPR_L2CR); if (cache_reg & L2CR_L2E) { cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); while (mfspr(SPR_L2CR) & L2CR_L2HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L2CR_L2E; mtspr(SPR_L2CR, cache_reg); powerpc_sync(); mtspr(SPR_L2CR, cache_reg | L2CR_L2I); powerpc_sync(); while (mfspr(SPR_L2CR) & L2CR_L2I) ; /* Busy wait for L2 cache invalidate */ powerpc_sync(); } cache_reg = mfspr(SPR_L3CR); if (cache_reg & L3CR_L3E) { cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); while (mfspr(SPR_L3CR) & L3CR_L3HWF) ; /* Busy wait for cache to flush */ powerpc_sync(); cache_reg &= ~L3CR_L3E; mtspr(SPR_L3CR, cache_reg); powerpc_sync(); mtspr(SPR_L3CR, cache_reg | L3CR_L3I); powerpc_sync(); while (mfspr(SPR_L3CR) & L3CR_L3I) ; /* Busy wait for L3 cache invalidate */ powerpc_sync(); } mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); powerpc_sync(); isync(); mtmsr(msr); } +#ifndef __powerpc64__ void -cpu_sleep() +mpc745x_sleep() { static u_quad_t timebase = 0; static register_t sprgs[4]; static register_t srrs[2]; jmp_buf resetjb; struct thread *fputd; struct thread *vectd; register_t hid0; register_t msr; register_t saved_msr; ap_pcpu = pcpup; PCPU_SET(restore, &resetjb); saved_msr = mfmsr(); fputd = PCPU_GET(fputhread); vectd = PCPU_GET(vecthread); if (fputd != NULL) save_fpu(fputd); if (vectd != NULL) save_vec(vectd); if (setjmp(resetjb) == 0) { sprgs[0] = mfspr(SPR_SPRG0); sprgs[1] = mfspr(SPR_SPRG1); sprgs[2] = mfspr(SPR_SPRG2); sprgs[3] = mfspr(SPR_SPRG3); srrs[0] = mfspr(SPR_SRR0); srrs[1] = mfspr(SPR_SRR1); timebase = mftb(); powerpc_sync(); flush_disable_caches(); hid0 = mfspr(SPR_HID0); hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; powerpc_sync(); isync(); msr = mfmsr() | PSL_POW; mtspr(SPR_HID0, hid0); powerpc_sync(); while (1) mtmsr(msr); } platform_smp_timebase_sync(timebase, 0); PCPU_SET(curthread, curthread); PCPU_SET(curpcb, curthread->td_pcb); pmap_activate(curthread); powerpc_sync(); mtspr(SPR_SPRG0, sprgs[0]); mtspr(SPR_SPRG1, sprgs[1]); mtspr(SPR_SPRG2, sprgs[2]); mtspr(SPR_SPRG3, sprgs[3]); mtspr(SPR_SRR0, srrs[0]); mtspr(SPR_SRR1, srrs[1]); mtmsr(saved_msr); if (fputd == curthread) enable_fpu(curthread); if (vectd == curthread) enable_vec(curthread); powerpc_sync(); } +#endif diff --git a/sys/powerpc/include/cpu.h b/sys/powerpc/include/cpu.h index 9ae18e13900a..256e6d3eabf0 100644 --- a/sys/powerpc/include/cpu.h +++ b/sys/powerpc/include/cpu.h @@ -1,153 +1,157 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (C) 1995-1997 Wolfgang Solfrank. * Copyright (C) 1995-1997 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: cpu.h,v 1.11 2000/05/26 21:19:53 thorpej Exp $ * $FreeBSD$ */ #ifndef _MACHINE_CPU_H_ #define _MACHINE_CPU_H_ #include #include #include /* * CPU Feature Attributes * * These are defined in the PowerPC ELF ABI for the AT_HWCAP vector, * and are exported to userland via the machdep.cpu_features * sysctl. */ extern u_long cpu_features; extern u_long cpu_features2; #define PPC_FEATURE_32 0x80000000 /* Always true */ #define PPC_FEATURE_64 0x40000000 /* Defined on a 64-bit CPU */ #define PPC_FEATURE_601_INSTR 0x20000000 /* Defined on a 64-bit CPU */ #define PPC_FEATURE_HAS_ALTIVEC 0x10000000 #define PPC_FEATURE_HAS_FPU 0x08000000 #define PPC_FEATURE_HAS_MMU 0x04000000 #define PPC_FEATURE_UNIFIED_CACHE 0x01000000 #define PPC_FEATURE_HAS_SPE 0x00800000 #define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000 #define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000 #define PPC_FEATURE_NO_TB 0x00100000 #define PPC_FEATURE_POWER4 0x00080000 #define PPC_FEATURE_POWER5 0x00040000 #define PPC_FEATURE_POWER5_PLUS 0x00020000 #define PPC_FEATURE_CELL 0x00010000 #define PPC_FEATURE_BOOKE 0x00008000 #define PPC_FEATURE_SMT 0x00004000 #define PPC_FEATURE_ICACHE_SNOOP 0x00002000 #define PPC_FEATURE_ARCH_2_05 0x00001000 #define PPC_FEATURE_HAS_DFP 0x00000400 #define PPC_FEATURE_POWER6_EXT 0x00000200 #define PPC_FEATURE_ARCH_2_06 0x00000100 #define PPC_FEATURE_HAS_VSX 0x00000080 #define PPC_FEATURE_TRUE_LE 0x00000002 #define PPC_FEATURE_PPC_LE 0x00000001 #define PPC_FEATURE2_ARCH_2_07 0x80000000 #define PPC_FEATURE2_HTM 0x40000000 #define PPC_FEATURE2_DSCR 0x20000000 #define PPC_FEATURE2_EBB 0x10000000 #define PPC_FEATURE2_ISEL 0x08000000 #define PPC_FEATURE2_TAR 0x04000000 #define PPC_FEATURE2_HAS_VEC_CRYPTO 0x02000000 #define PPC_FEATURE2_HTM_NOSC 0x01000000 #define PPC_FEATURE2_ARCH_3_00 0x00800000 #define PPC_FEATURE2_HAS_IEEE128 0x00400000 #define PPC_FEATURE2_DARN 0x00200000 #define PPC_FEATURE2_SCV 0x00100000 #define PPC_FEATURE2_HTM_NOSUSPEND 0x00080000 #define PPC_FEATURE_BITMASK \ "\20" \ "\040PPC32\037PPC64\036PPC601\035ALTIVEC\034FPU\033MMU\031UNIFIEDCACHE" \ "\030SPE\027SPESFP\026DPESFP\025NOTB\024POWER4\023POWER5\022P5PLUS\021CELL"\ "\020BOOKE\017SMT\016ISNOOP\015ARCH205\013DFP\011ARCH206\010VSX"\ "\002TRUELE\001PPCLE" #define PPC_FEATURE2_BITMASK \ "\20" \ "\040ARCH207\037HTM\036DSCR\034ISEL\033TAR\032VCRYPTO\031HTMNOSC" \ "\030ARCH300\027IEEE128\026DARN\025SCV\024HTMNOSUSP" #define TRAPF_USERMODE(frame) (((frame)->srr1 & PSL_PR) != 0) #define TRAPF_PC(frame) ((frame)->srr0) /* * CTL_MACHDEP definitions. */ #define CPU_CACHELINE 1 static __inline u_int64_t get_cyclecount(void) { u_int32_t _upper, _lower; u_int64_t _time; __asm __volatile( "mftb %0\n" "mftbu %1" : "=r" (_lower), "=r" (_upper)); _time = (u_int64_t)_upper; _time = (_time << 32) + _lower; return (_time); } #define cpu_getstack(td) ((td)->td_frame->fixreg[1]) #define cpu_spinwait() __asm __volatile("or 27,27,27") /* yield */ #define cpu_lock_delay() DELAY(1) extern char btext[]; extern char etext[]; struct thread; #ifdef __powerpc64__ extern void enter_idle_powerx(void); extern uint64_t can_wakeup; extern register_t lpcr; #endif void cpu_halt(void); void cpu_reset(void); -void cpu_sleep(void); void flush_disable_caches(void); void fork_trampoline(void); void swi_vm(void *); int cpu_machine_check(struct thread *, struct trapframe *, int *); + +#ifndef __powerpc64__ +void mpc745x_sleep(void); +#endif + #endif /* _MACHINE_CPU_H_ */ diff --git a/sys/powerpc/powermac/platform_powermac.c b/sys/powerpc/powermac/platform_powermac.c index d08b3fe9181c..7f78bc63c1ab 100644 --- a/sys/powerpc/powermac/platform_powermac.c +++ b/sys/powerpc/powermac/platform_powermac.c @@ -1,413 +1,424 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2008 Marcel Moolenaar * Copyright (c) 2009 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include /* For save_vec() */ #include #include #include /* For save_fpu() */ #include #include #include #include #include #include #include #include "platform_if.h" extern void *ap_pcpu; static int powermac_probe(platform_t); static int powermac_attach(platform_t); void powermac_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static u_long powermac_timebase_freq(platform_t, struct cpuref *cpuref); static int powermac_smp_first_cpu(platform_t, struct cpuref *cpuref); static int powermac_smp_next_cpu(platform_t, struct cpuref *cpuref); static int powermac_smp_get_bsp(platform_t, struct cpuref *cpuref); static int powermac_smp_start_cpu(platform_t, struct pcpu *cpu); static void powermac_smp_timebase_sync(platform_t, u_long tb, int ap); static void powermac_reset(platform_t); +#ifndef __powerpc64__ static void powermac_sleep(platform_t); +#endif static platform_method_t powermac_methods[] = { PLATFORMMETHOD(platform_probe, powermac_probe), PLATFORMMETHOD(platform_attach, powermac_attach), PLATFORMMETHOD(platform_mem_regions, powermac_mem_regions), PLATFORMMETHOD(platform_timebase_freq, powermac_timebase_freq), PLATFORMMETHOD(platform_smp_first_cpu, powermac_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, powermac_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, powermac_smp_get_bsp), PLATFORMMETHOD(platform_smp_start_cpu, powermac_smp_start_cpu), PLATFORMMETHOD(platform_smp_timebase_sync, powermac_smp_timebase_sync), PLATFORMMETHOD(platform_reset, powermac_reset), +#ifndef __powerpc64__ PLATFORMMETHOD(platform_sleep, powermac_sleep), +#endif PLATFORMMETHOD_END }; static platform_def_t powermac_platform = { "powermac", powermac_methods, 0 }; PLATFORM_DEF(powermac_platform); static int powermac_probe(platform_t plat) { char compat[255]; ssize_t compatlen; char *curstr; phandle_t root; root = OF_peer(0); if (root == 0) return (ENXIO); compatlen = OF_getprop(root, "compatible", compat, sizeof(compat)); for (curstr = compat; curstr < compat + compatlen; curstr += strlen(curstr) + 1) { if (strncmp(curstr, "MacRISC", 7) == 0) return (BUS_PROBE_SPECIFIC); } return (ENXIO); } void powermac_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz) { phandle_t memory; cell_t memoryprop[PHYS_AVAIL_SZ * 2]; ssize_t propsize, i, j; int physacells = 1; memory = OF_finddevice("/memory"); if (memory == -1) memory = OF_finddevice("/memory@0"); /* "reg" has variable #address-cells, but #size-cells is always 1 */ OF_getprop(OF_parent(memory), "#address-cells", &physacells, sizeof(physacells)); propsize = OF_getprop(memory, "reg", memoryprop, sizeof(memoryprop)); propsize /= sizeof(cell_t); for (i = 0, j = 0; i < propsize; i += physacells+1, j++) { phys[j].mr_start = memoryprop[i]; if (physacells == 2) { #ifndef __powerpc64__ /* On 32-bit PPC, ignore regions starting above 4 GB */ if (memoryprop[i] != 0) { j--; continue; } #else phys[j].mr_start <<= 32; #endif phys[j].mr_start |= memoryprop[i+1]; } phys[j].mr_size = memoryprop[i + physacells]; } *physsz = j; /* "available" always has #address-cells = 1 */ propsize = OF_getprop(memory, "available", memoryprop, sizeof(memoryprop)); if (propsize <= 0) { for (i = 0; i < *physsz; i++) { avail[i].mr_start = phys[i].mr_start; avail[i].mr_size = phys[i].mr_size; } *availsz = *physsz; } else { propsize /= sizeof(cell_t); for (i = 0, j = 0; i < propsize; i += 2, j++) { avail[j].mr_start = memoryprop[i]; avail[j].mr_size = memoryprop[i + 1]; } #ifdef __powerpc64__ /* Add in regions above 4 GB to the available list */ for (i = 0; i < *physsz; i++) { if (phys[i].mr_start > BUS_SPACE_MAXADDR_32BIT) { avail[j].mr_start = phys[i].mr_start; avail[j].mr_size = phys[i].mr_size; j++; } } #endif *availsz = j; } } static int powermac_attach(platform_t plat) { phandle_t rootnode; char model[32]; /* * Quiesce Open Firmware on PowerMac11,2 and 12,1. It is * necessary there to shut down a background thread doing fan * management, and is harmful on other machines (it will make OF * shut off power to various system components it had turned on). * * Note: we don't need to worry about which OF module we are * using since this is called only from very early boot, within * OF's boot context. */ rootnode = OF_finddevice("/"); if (OF_getprop(rootnode, "model", model, sizeof(model)) > 0) { if (strcmp(model, "PowerMac11,2") == 0 || strcmp(model, "PowerMac12,1") == 0) { ofw_quiesce(); } } return (0); } static u_long powermac_timebase_freq(platform_t plat, struct cpuref *cpuref) { phandle_t phandle; int32_t ticks = -1; phandle = cpuref->cr_hwref; OF_getprop(phandle, "timebase-frequency", &ticks, sizeof(ticks)); if (ticks <= 0) panic("Unable to determine timebase frequency!"); return (ticks); } static int powermac_smp_fill_cpuref(struct cpuref *cpuref, phandle_t cpu) { cell_t cpuid; int res; cpuref->cr_hwref = cpu; res = OF_getprop(cpu, "reg", &cpuid, sizeof(cpuid)); /* * psim doesn't have a reg property, so assume 0 as for the * uniprocessor case in the CHRP spec. */ if (res < 0) { cpuid = 0; } cpuref->cr_cpuid = cpuid & 0xff; return (0); } static int powermac_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { char buf[8]; phandle_t cpu, dev, root; int res; root = OF_peer(0); dev = OF_child(root); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } if (dev == 0) { /* * psim doesn't have a name property on the /cpus node, * but it can be found directly */ dev = OF_finddevice("/cpus"); if (dev == -1) return (ENOENT); } cpu = OF_child(dev); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); return (powermac_smp_fill_cpuref(cpuref, cpu)); } static int powermac_smp_next_cpu(platform_t plat, struct cpuref *cpuref) { char buf[8]; phandle_t cpu; int res; cpu = OF_peer(cpuref->cr_hwref); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); return (powermac_smp_fill_cpuref(cpuref, cpu)); } static int powermac_smp_get_bsp(platform_t plat, struct cpuref *cpuref) { ihandle_t inst; phandle_t bsp, chosen; int res; chosen = OF_finddevice("/chosen"); if (chosen == -1) return (ENXIO); res = OF_getprop(chosen, "cpu", &inst, sizeof(inst)); if (res < 0) return (ENXIO); bsp = OF_instance_to_package(inst); return (powermac_smp_fill_cpuref(cpuref, bsp)); } static int powermac_smp_start_cpu(platform_t plat, struct pcpu *pc) { #ifdef SMP phandle_t cpu; volatile uint8_t *rstvec; static volatile uint8_t *rstvec_virtbase = NULL; int res, reset, timeout; cpu = pc->pc_hwref; res = OF_getprop(cpu, "soft-reset", &reset, sizeof(reset)); if (res < 0) { reset = 0x58; switch (pc->pc_cpuid) { case 0: reset += 0x03; break; case 1: reset += 0x04; break; case 2: reset += 0x0f; break; case 3: reset += 0x10; break; default: return (ENXIO); } } ap_pcpu = pc; if (rstvec_virtbase == NULL) rstvec_virtbase = pmap_mapdev(0x80000000, PAGE_SIZE); rstvec = rstvec_virtbase + reset; *rstvec = 4; powerpc_sync(); (void)(*rstvec); powerpc_sync(); DELAY(1); *rstvec = 0; powerpc_sync(); (void)(*rstvec); powerpc_sync(); timeout = 10000; while (!pc->pc_awake && timeout--) DELAY(100); return ((pc->pc_awake) ? 0 : EBUSY); #else /* No SMP support */ return (ENXIO); #endif } static void powermac_smp_timebase_sync(platform_t plat, u_long tb, int ap) { mttb(tb); } static void powermac_reset(platform_t platform) { OF_reboot(); } +#ifndef __powerpc64__ void powermac_sleep(platform_t platform) { + /* Only supports MPC745x for now. */ + if (!MPC745X_P(mfspr(SPR_PVR) >> 16)) { + printf("sleep only supported for G4 PowerMac hardware.\n"); + return; + } *(unsigned long *)0x80 = 0x100; - cpu_sleep(); + mpc745x_sleep(); } +#endif