Index: head/sys/powerpc/aim/uma_machdep.c =================================================================== --- head/sys/powerpc/aim/uma_machdep.c (revision 282263) +++ head/sys/powerpc/aim/uma_machdep.c (nonexistent) @@ -1,98 +0,0 @@ -/*- - * Copyright (c) 2003 The FreeBSD Project - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static int hw_uma_mdpages; -SYSCTL_INT(_hw, OID_AUTO, uma_mdpages, CTLFLAG_RD, &hw_uma_mdpages, 0, - "UMA MD pages in use"); - -void * -uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) -{ - void *va; - vm_page_t m; - int pflags; - - *flags = UMA_SLAB_PRIV; - pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; - - for (;;) { - m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); - if (m == NULL) { - if (wait & M_NOWAIT) - return (NULL); - VM_WAIT; - } else - break; - } - - va = (void *) VM_PAGE_TO_PHYS(m); - - if (!hw_direct_map) - pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); - - if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) - bzero(va, PAGE_SIZE); - atomic_add_int(&hw_uma_mdpages, 1); - - return (va); -} - -void -uma_small_free(void *mem, vm_size_t size, u_int8_t flags) -{ - vm_page_t m; - - if (!hw_direct_map) - pmap_remove(kernel_pmap,(vm_offset_t)mem, - (vm_offset_t)mem + PAGE_SIZE); - - m = PHYS_TO_VM_PAGE((vm_offset_t)mem); - m->wire_count--; - vm_page_free(m); - atomic_subtract_int(&vm_cnt.v_wire_count, 1); - atomic_subtract_int(&hw_uma_mdpages, 1); -} Property changes on: head/sys/powerpc/aim/uma_machdep.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/aim/machdep.c =================================================================== --- head/sys/powerpc/aim/machdep.c (revision 282263) +++ head/sys/powerpc/aim/machdep.c (nonexistent) @@ -1,947 +0,0 @@ -/*- - * Copyright (C) 1995, 1996 Wolfgang Solfrank. - * Copyright (C) 1995, 1996 TooLs GmbH. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by TooLs GmbH. - * 4. The name of TooLs GmbH may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/*- - * Copyright (C) 2001 Benno Rice - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_compat.h" -#include "opt_ddb.h" -#include "opt_kstack_pages.h" -#include "opt_platform.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include -#ifndef __powerpc64__ -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include - -int cold = 1; -#ifdef __powerpc64__ -extern int n_slbs; -int cacheline_size = 128; -#else -int cacheline_size = 32; -#endif -int hw_direct_map = 1; - -extern void *ap_pcpu; - -struct pcpu __pcpu[MAXCPU]; - -static struct trapframe frame0; - -char machine[] = "powerpc"; -SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); - -static void cpu_startup(void *); -SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); - -SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, - CTLFLAG_RD, &cacheline_size, 0, ""); - -uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *); - -long Maxmem = 0; -long realmem = 0; - -#ifndef __powerpc64__ -struct bat battable[16]; -#endif - -struct kva_md_info kmi; - -static void -cpu_startup(void *dummy) -{ - - /* - * Initialise the decrementer-based clock. - */ - decr_init(); - - /* - * Good {morning,afternoon,evening,night}. - */ - cpu_setup(PCPU_GET(cpuid)); - -#ifdef PERFMON - perfmon_init(); -#endif - printf("real memory = %ld (%ld MB)\n", ptoa(physmem), - ptoa(physmem) / 1048576); - realmem = physmem; - - if (bootverbose) - printf("available KVA = %zd (%zd MB)\n", - virtual_end - virtual_avail, - (virtual_end - virtual_avail) / 1048576); - - /* - * Display any holes after the first chunk of extended memory. - */ - if (bootverbose) { - int indx; - - printf("Physical memory chunk(s):\n"); - for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { - vm_offset_t size1 = - phys_avail[indx + 1] - phys_avail[indx]; - - #ifdef __powerpc64__ - printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n", - #else - printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n", - #endif - phys_avail[indx], phys_avail[indx + 1] - 1, size1, - size1 / PAGE_SIZE); - } - } - - vm_ksubmap_init(&kmi); - - printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count), - ptoa(vm_cnt.v_free_count) / 1048576); - - /* - * Set up buffers, so they can be used to read disk labels. - */ - bufinit(); - vm_pager_bufferinit(); -} - -extern vm_offset_t __startkernel, __endkernel; -extern unsigned char __bss_start[]; -extern unsigned char __sbss_start[]; -extern unsigned char __sbss_end[]; -extern unsigned char _end[]; - -#ifndef __powerpc64__ -/* Bits for running on 64-bit systems in 32-bit mode. */ -extern void *testppc64, *testppc64size; -extern void *restorebridge, *restorebridgesize; -extern void *rfid_patch, *rfi_patch1, *rfi_patch2; -extern void *trapcode64; - -extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; -#endif - -extern void *rstcode, *rstcodeend; -extern void *trapcode, *trapcodeend; -extern void *generictrap, *generictrap64; -extern void *slbtrap, *slbtrapend; -extern void *alitrap, *aliend; -extern void *dsitrap, *dsiend; -extern void *decrint, *decrsize; -extern void *extint, *extsize; -extern void *dblow, *dbend; -extern void *imisstrap, *imisssize; -extern void *dlmisstrap, *dlmisssize; -extern void *dsmisstrap, *dsmisssize; - -uintptr_t -powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp) -{ - struct pcpu *pc; - vm_offset_t startkernel, endkernel; - size_t trap_offset, trapsize; - vm_offset_t trap; - void *kmdp; - char *env; - register_t msr, scratch; - uint8_t *cache_check; - int cacheline_warn; - #ifndef __powerpc64__ - int ppc64; - #endif -#ifdef DDB - vm_offset_t ksym_start; - vm_offset_t ksym_end; -#endif - - kmdp = NULL; - trap_offset = 0; - cacheline_warn = 0; - - /* First guess at start/end kernel positions */ - startkernel = __startkernel; - endkernel = __endkernel; - - /* Check for ePAPR loader, which puts a magic value into r6 */ - if (mdp == (void *)0x65504150) - mdp = NULL; - - /* - * Parse metadata if present and fetch parameters. Must be done - * before console is inited so cninit gets the right value of - * boothowto. - */ - if (mdp != NULL) { - preload_metadata = mdp; - kmdp = preload_search_by_type("elf kernel"); - if (kmdp != NULL) { - boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); - kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); - endkernel = ulmax(endkernel, MD_FETCH(kmdp, - MODINFOMD_KERNEND, vm_offset_t)); -#ifdef DDB - ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); - ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); - db_fetch_ksymtab(ksym_start, ksym_end); -#endif - } - } else { - bzero(__sbss_start, __sbss_end - __sbss_start); - bzero(__bss_start, _end - __bss_start); - } - - /* Store boot environment state */ - OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry); - - /* - * Init params/tunables that can be overridden by the loader - */ - init_param1(); - - /* - * Start initializing proc0 and thread0. - */ - proc_linkup0(&proc0, &thread0); - thread0.td_frame = &frame0; - - /* - * Set up per-cpu data. - */ - pc = __pcpu; - pcpu_init(pc, 0, sizeof(struct pcpu)); - pc->pc_curthread = &thread0; -#ifdef __powerpc64__ - __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); -#else - __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); -#endif - pc->pc_cpuid = 0; - - __asm __volatile("mtsprg 0, %0" :: "r"(pc)); - - /* - * Init mutexes, which we use heavily in PMAP - */ - - mutex_init(); - - /* - * Install the OF client interface - */ - - OF_bootstrap(); - - /* - * Initialize the console before printing anything. - */ - cninit(); - - /* - * Complain if there is no metadata. - */ - if (mdp == NULL || kmdp == NULL) { - printf("powerpc_init: no loader metadata.\n"); - } - - /* - * Init KDB - */ - - kdb_init(); - - /* Various very early CPU fix ups */ - switch (mfpvr() >> 16) { - /* - * PowerPC 970 CPUs have a misfeature requested by Apple that - * makes them pretend they have a 32-byte cacheline. Turn this - * off before we measure the cacheline size. - */ - case IBM970: - case IBM970FX: - case IBM970MP: - case IBM970GX: - scratch = mfspr(SPR_HID5); - scratch &= ~HID5_970_DCBZ_SIZE_HI; - mtspr(SPR_HID5, scratch); - break; - #ifdef __powerpc64__ - case IBMPOWER7: - case IBMPOWER7PLUS: - case IBMPOWER8: - case IBMPOWER8E: - /* XXX: get from ibm,slb-size in device tree */ - n_slbs = 32; - break; - #endif - } - - /* - * Initialize the interrupt tables and figure out our cache line - * size and whether or not we need the 64-bit bridge code. - */ - - /* - * Disable translation in case the vector area hasn't been - * mapped (G5). Note that no OFW calls can be made until - * translation is re-enabled. - */ - - msr = mfmsr(); - mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); - - /* - * Measure the cacheline size using dcbz - * - * Use EXC_PGM as a playground. We are about to overwrite it - * anyway, we know it exists, and we know it is cache-aligned. - */ - - cache_check = (void *)EXC_PGM; - - for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) - cache_check[cacheline_size] = 0xff; - - __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); - - /* Find the first byte dcbz did not zero to get the cache line size */ - for (cacheline_size = 0; cacheline_size < 0x100 && - cache_check[cacheline_size] == 0; cacheline_size++); - - /* Work around psim bug */ - if (cacheline_size == 0) { - cacheline_warn = 1; - cacheline_size = 32; - } - - /* Make sure the kernel icache is valid before we go too much further */ - __syncicache((caddr_t)startkernel, endkernel - startkernel); - - #ifndef __powerpc64__ - /* - * Figure out whether we need to use the 64 bit PMAP. This works by - * executing an instruction that is only legal on 64-bit PPC (mtmsrd), - * and setting ppc64 = 0 if that causes a trap. - */ - - ppc64 = 1; - - bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); - __syncicache((void *)EXC_PGM, (size_t)&testppc64size); - - __asm __volatile("\ - mfmsr %0; \ - mtsprg2 %1; \ - \ - mtmsrd %0; \ - mfsprg2 %1;" - : "=r"(scratch), "=r"(ppc64)); - - if (ppc64) - cpu_features |= PPC_FEATURE_64; - - /* - * Now copy restorebridge into all the handlers, if necessary, - * and set up the trap tables. - */ - - if (cpu_features & PPC_FEATURE_64) { - /* Patch the two instances of rfi -> rfid */ - bcopy(&rfid_patch,&rfi_patch1,4); - #ifdef KDB - /* rfi_patch2 is at the end of dbleave */ - bcopy(&rfid_patch,&rfi_patch2,4); - #endif - } - #else /* powerpc64 */ - cpu_features |= PPC_FEATURE_64; - #endif - - trapsize = (size_t)&trapcodeend - (size_t)&trapcode; - - /* - * Copy generic handler into every possible trap. Special cases will get - * different ones in a minute. - */ - for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) - bcopy(&trapcode, (void *)trap, trapsize); - - #ifndef __powerpc64__ - if (cpu_features & PPC_FEATURE_64) { - /* - * Copy a code snippet to restore 32-bit bridge mode - * to the top of every non-generic trap handler - */ - - trap_offset += (size_t)&restorebridgesize; - bcopy(&restorebridge, (void *)EXC_RST, trap_offset); - bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); - bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); - bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); - bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); - bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); - bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); - } - #endif - - bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - - (size_t)&rstcode); - -#ifdef KDB - bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - - (size_t)&dblow); - bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - - (size_t)&dblow); - bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - - (size_t)&dblow); - bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - - (size_t)&dblow); -#endif - bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - - (size_t)&alitrap); - bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - - (size_t)&dsitrap); - - #ifdef __powerpc64__ - /* Set TOC base so that the interrupt code can get at it */ - *((void **)TRAP_GENTRAP) = &generictrap; - *((register_t *)TRAP_TOCBASE) = toc; - - bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); - bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); - #else - /* Set branch address for trap code */ - if (cpu_features & PPC_FEATURE_64) - *((void **)TRAP_GENTRAP) = &generictrap64; - else - *((void **)TRAP_GENTRAP) = &generictrap; - *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; - - /* G2-specific TLB miss helper handlers */ - bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); - bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); - bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); - #endif - __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); - - /* - * Restore MSR - */ - mtmsr(msr); - - /* Warn if cachline size was not determined */ - if (cacheline_warn == 1) { - printf("WARNING: cacheline size undetermined, setting to 32\n"); - } - - /* - * Choose a platform module so we can get the physical memory map. - */ - - platform_probe_and_attach(); - - /* - * Initialise virtual memory. Use BUS_PROBE_GENERIC priority - * in case the platform module had a better idea of what we - * should do. - */ - if (cpu_features & PPC_FEATURE_64) - pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); - else - pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); - - pmap_bootstrap(startkernel, endkernel); - mtmsr(PSL_KERNSET & ~PSL_EE); - - /* - * Initialize params/tunables that are derived from memsize - */ - init_param2(physmem); - - /* - * Grab booted kernel's name - */ - env = kern_getenv("kernelname"); - if (env != NULL) { - strlcpy(kernelname, env, sizeof(kernelname)); - freeenv(env); - } - - /* - * Finish setting up thread0. - */ - thread0.td_pcb = (struct pcb *) - ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - - sizeof(struct pcb)) & ~15UL); - bzero((void *)thread0.td_pcb, sizeof(struct pcb)); - pc->pc_curpcb = thread0.td_pcb; - - /* Initialise the message buffer. */ - msgbufinit(msgbufp, msgbufsize); - -#ifdef KDB - if (boothowto & RB_KDB) - kdb_enter(KDB_WHY_BOOTFLAGS, - "Boot flags requested debugger"); -#endif - - return (((uintptr_t)thread0.td_pcb - - (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); -} - -void -bzero(void *buf, size_t len) -{ - caddr_t p; - - p = buf; - - while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { - *p++ = 0; - len--; - } - - while (len >= sizeof(u_long) * 8) { - *(u_long*) p = 0; - *((u_long*) p + 1) = 0; - *((u_long*) p + 2) = 0; - *((u_long*) p + 3) = 0; - len -= sizeof(u_long) * 8; - *((u_long*) p + 4) = 0; - *((u_long*) p + 5) = 0; - *((u_long*) p + 6) = 0; - *((u_long*) p + 7) = 0; - p += sizeof(u_long) * 8; - } - - while (len >= sizeof(u_long)) { - *(u_long*) p = 0; - len -= sizeof(u_long); - p += sizeof(u_long); - } - - while (len) { - *p++ = 0; - len--; - } -} - -void -cpu_boot(int howto) -{ -} - -/* - * Flush the D-cache for non-DMA I/O so that the I-cache can - * be made coherent later. - */ -void -cpu_flush_dcache(void *ptr, size_t len) -{ - /* TBD */ -} - -/* - * Shutdown the CPU as much as possible. - */ -void -cpu_halt(void) -{ - - OF_exit(); -} - -int -ptrace_set_pc(struct thread *td, unsigned long addr) -{ - struct trapframe *tf; - - tf = td->td_frame; - tf->srr0 = (register_t)addr; - - return (0); -} - -int -ptrace_single_step(struct thread *td) -{ - struct trapframe *tf; - - tf = td->td_frame; - tf->srr1 |= PSL_SE; - - return (0); -} - -int -ptrace_clear_single_step(struct thread *td) -{ - struct trapframe *tf; - - tf = td->td_frame; - tf->srr1 &= ~PSL_SE; - - return (0); -} - -void -kdb_cpu_clear_singlestep(void) -{ - - kdb_frame->srr1 &= ~PSL_SE; -} - -void -kdb_cpu_set_singlestep(void) -{ - - kdb_frame->srr1 |= PSL_SE; -} - -/* - * Initialise a struct pcpu. - */ -void -cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) -{ -#ifdef __powerpc64__ -/* Copy the SLB contents from the current CPU */ -memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); -#endif -} - -void -spinlock_enter(void) -{ - struct thread *td; - register_t msr; - - td = curthread; - if (td->td_md.md_spinlock_count == 0) { - __asm __volatile("or 2,2,2"); /* Set high thread priority */ - msr = intr_disable(); - td->td_md.md_spinlock_count = 1; - td->td_md.md_saved_msr = msr; - } else - td->td_md.md_spinlock_count++; - critical_enter(); -} - -void -spinlock_exit(void) -{ - struct thread *td; - register_t msr; - - td = curthread; - critical_exit(); - msr = td->td_md.md_saved_msr; - td->td_md.md_spinlock_count--; - if (td->td_md.md_spinlock_count == 0) { - intr_restore(msr); - __asm __volatile("or 6,6,6"); /* Set normal thread priority */ - } -} - -#ifndef __powerpc64__ - -uint64_t -va_to_vsid(pmap_t pm, vm_offset_t va) -{ - return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); -} - -#endif - -vm_offset_t -pmap_early_io_map(vm_paddr_t pa, vm_size_t size) -{ - - return (pa); -} - -/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ -void -flush_disable_caches(void) -{ - register_t msr; - register_t msscr0; - register_t cache_reg; - volatile uint32_t *memp; - uint32_t temp; - int i; - int x; - - msr = mfmsr(); - powerpc_sync(); - mtmsr(msr & ~(PSL_EE | PSL_DR)); - msscr0 = mfspr(SPR_MSSCR0); - msscr0 &= ~MSSCR0_L2PFE; - mtspr(SPR_MSSCR0, msscr0); - powerpc_sync(); - isync(); - __asm__ __volatile__("dssall; sync"); - powerpc_sync(); - isync(); - __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); - __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); - __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); - - /* Lock the L1 Data cache. */ - mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); - powerpc_sync(); - isync(); - - mtspr(SPR_LDSTCR, 0); - - /* - * Perform this in two stages: Flush the cache starting in RAM, then do it - * from ROM. - */ - memp = (volatile uint32_t *)0x00000000; - for (i = 0; i < 128 * 1024; i++) { - temp = *memp; - __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); - memp += 32/sizeof(*memp); - } - - memp = (volatile uint32_t *)0xfff00000; - x = 0xfe; - - for (; x != 0xff;) { - mtspr(SPR_LDSTCR, x); - for (i = 0; i < 128; i++) { - temp = *memp; - __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); - memp += 32/sizeof(*memp); - } - x = ((x << 1) | 1) & 0xff; - } - mtspr(SPR_LDSTCR, 0); - - cache_reg = mfspr(SPR_L2CR); - if (cache_reg & L2CR_L2E) { - cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); - mtspr(SPR_L2CR, cache_reg); - powerpc_sync(); - mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); - while (mfspr(SPR_L2CR) & L2CR_L2HWF) - ; /* Busy wait for cache to flush */ - powerpc_sync(); - cache_reg &= ~L2CR_L2E; - mtspr(SPR_L2CR, cache_reg); - powerpc_sync(); - mtspr(SPR_L2CR, cache_reg | L2CR_L2I); - powerpc_sync(); - while (mfspr(SPR_L2CR) & L2CR_L2I) - ; /* Busy wait for L2 cache invalidate */ - powerpc_sync(); - } - - cache_reg = mfspr(SPR_L3CR); - if (cache_reg & L3CR_L3E) { - cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); - mtspr(SPR_L3CR, cache_reg); - powerpc_sync(); - mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); - while (mfspr(SPR_L3CR) & L3CR_L3HWF) - ; /* Busy wait for cache to flush */ - powerpc_sync(); - cache_reg &= ~L3CR_L3E; - mtspr(SPR_L3CR, cache_reg); - powerpc_sync(); - mtspr(SPR_L3CR, cache_reg | L3CR_L3I); - powerpc_sync(); - while (mfspr(SPR_L3CR) & L3CR_L3I) - ; /* Busy wait for L3 cache invalidate */ - powerpc_sync(); - } - - mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); - powerpc_sync(); - isync(); - - mtmsr(msr); -} - -void -cpu_sleep() -{ - static u_quad_t timebase = 0; - static register_t sprgs[4]; - static register_t srrs[2]; - - jmp_buf resetjb; - struct thread *fputd; - struct thread *vectd; - register_t hid0; - register_t msr; - register_t saved_msr; - - ap_pcpu = pcpup; - - PCPU_SET(restore, &resetjb); - - saved_msr = mfmsr(); - fputd = PCPU_GET(fputhread); - vectd = PCPU_GET(vecthread); - if (fputd != NULL) - save_fpu(fputd); - if (vectd != NULL) - save_vec(vectd); - if (setjmp(resetjb) == 0) { - sprgs[0] = mfspr(SPR_SPRG0); - sprgs[1] = mfspr(SPR_SPRG1); - sprgs[2] = mfspr(SPR_SPRG2); - sprgs[3] = mfspr(SPR_SPRG3); - srrs[0] = mfspr(SPR_SRR0); - srrs[1] = mfspr(SPR_SRR1); - timebase = mftb(); - powerpc_sync(); - flush_disable_caches(); - hid0 = mfspr(SPR_HID0); - hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; - powerpc_sync(); - isync(); - msr = mfmsr() | PSL_POW; - mtspr(SPR_HID0, hid0); - powerpc_sync(); - - while (1) - mtmsr(msr); - } - mttb(timebase); - PCPU_SET(curthread, curthread); - PCPU_SET(curpcb, curthread->td_pcb); - pmap_activate(curthread); - powerpc_sync(); - mtspr(SPR_SPRG0, sprgs[0]); - mtspr(SPR_SPRG1, sprgs[1]); - mtspr(SPR_SPRG2, sprgs[2]); - mtspr(SPR_SPRG3, sprgs[3]); - mtspr(SPR_SRR0, srrs[0]); - mtspr(SPR_SRR1, srrs[1]); - mtmsr(saved_msr); - if (fputd == curthread) - enable_fpu(curthread); - if (vectd == curthread) - enable_vec(curthread); - powerpc_sync(); -} Property changes on: head/sys/powerpc/aim/machdep.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/aim/aim_machdep.c =================================================================== --- head/sys/powerpc/aim/aim_machdep.c (nonexistent) +++ head/sys/powerpc/aim/aim_machdep.c (revision 282264) @@ -0,0 +1,616 @@ +/*- + * Copyright (C) 1995, 1996 Wolfgang Solfrank. + * Copyright (C) 1995, 1996 TooLs GmbH. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by TooLs GmbH. + * 4. The name of TooLs GmbH may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Copyright (C) 2001 Benno Rice + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_compat.h" +#include "opt_ddb.h" +#include "opt_kstack_pages.h" +#include "opt_platform.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#ifndef __powerpc64__ +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#ifdef __powerpc64__ +extern int n_slbs; +#endif + +#ifndef __powerpc64__ +struct bat battable[16]; +#endif + +#ifndef __powerpc64__ +/* Bits for running on 64-bit systems in 32-bit mode. */ +extern void *testppc64, *testppc64size; +extern void *restorebridge, *restorebridgesize; +extern void *rfid_patch, *rfi_patch1, *rfi_patch2; +extern void *trapcode64; + +extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; +#endif + +extern void *rstcode, *rstcodeend; +extern void *trapcode, *trapcodeend; +extern void *generictrap, *generictrap64; +extern void *slbtrap, *slbtrapend; +extern void *alitrap, *aliend; +extern void *dsitrap, *dsiend; +extern void *decrint, *decrsize; +extern void *extint, *extsize; +extern void *dblow, *dbend; +extern void *imisstrap, *imisssize; +extern void *dlmisstrap, *dlmisssize; +extern void *dsmisstrap, *dsmisssize; + +extern void *ap_pcpu; + +void aim_cpu_init(vm_offset_t toc); + +void +aim_cpu_init(vm_offset_t toc) +{ + size_t trap_offset, trapsize; + vm_offset_t trap; + register_t msr, scratch; + uint8_t *cache_check; + int cacheline_warn; + #ifndef __powerpc64__ + int ppc64; + #endif + + trap_offset = 0; + cacheline_warn = 0; + + /* Various very early CPU fix ups */ + switch (mfpvr() >> 16) { + /* + * PowerPC 970 CPUs have a misfeature requested by Apple that + * makes them pretend they have a 32-byte cacheline. Turn this + * off before we measure the cacheline size. + */ + case IBM970: + case IBM970FX: + case IBM970MP: + case IBM970GX: + scratch = mfspr(SPR_HID5); + scratch &= ~HID5_970_DCBZ_SIZE_HI; + mtspr(SPR_HID5, scratch); + break; + #ifdef __powerpc64__ + case IBMPOWER7: + case IBMPOWER7PLUS: + case IBMPOWER8: + case IBMPOWER8E: + /* XXX: get from ibm,slb-size in device tree */ + n_slbs = 32; + break; + #endif + } + + /* + * Initialize the interrupt tables and figure out our cache line + * size and whether or not we need the 64-bit bridge code. + */ + + /* + * Disable translation in case the vector area hasn't been + * mapped (G5). Note that no OFW calls can be made until + * translation is re-enabled. + */ + + msr = mfmsr(); + mtmsr((msr & ~(PSL_IR | PSL_DR)) | PSL_RI); + + /* + * Measure the cacheline size using dcbz + * + * Use EXC_PGM as a playground. We are about to overwrite it + * anyway, we know it exists, and we know it is cache-aligned. + */ + + cache_check = (void *)EXC_PGM; + + for (cacheline_size = 0; cacheline_size < 0x100; cacheline_size++) + cache_check[cacheline_size] = 0xff; + + __asm __volatile("dcbz 0,%0":: "r" (cache_check) : "memory"); + + /* Find the first byte dcbz did not zero to get the cache line size */ + for (cacheline_size = 0; cacheline_size < 0x100 && + cache_check[cacheline_size] == 0; cacheline_size++); + + /* Work around psim bug */ + if (cacheline_size == 0) { + cacheline_warn = 1; + cacheline_size = 32; + } + + #ifndef __powerpc64__ + /* + * Figure out whether we need to use the 64 bit PMAP. This works by + * executing an instruction that is only legal on 64-bit PPC (mtmsrd), + * and setting ppc64 = 0 if that causes a trap. + */ + + ppc64 = 1; + + bcopy(&testppc64, (void *)EXC_PGM, (size_t)&testppc64size); + __syncicache((void *)EXC_PGM, (size_t)&testppc64size); + + __asm __volatile("\ + mfmsr %0; \ + mtsprg2 %1; \ + \ + mtmsrd %0; \ + mfsprg2 %1;" + : "=r"(scratch), "=r"(ppc64)); + + if (ppc64) + cpu_features |= PPC_FEATURE_64; + + /* + * Now copy restorebridge into all the handlers, if necessary, + * and set up the trap tables. + */ + + if (cpu_features & PPC_FEATURE_64) { + /* Patch the two instances of rfi -> rfid */ + bcopy(&rfid_patch,&rfi_patch1,4); + #ifdef KDB + /* rfi_patch2 is at the end of dbleave */ + bcopy(&rfid_patch,&rfi_patch2,4); + #endif + } + #else /* powerpc64 */ + cpu_features |= PPC_FEATURE_64; + #endif + + trapsize = (size_t)&trapcodeend - (size_t)&trapcode; + + /* + * Copy generic handler into every possible trap. Special cases will get + * different ones in a minute. + */ + for (trap = EXC_RST; trap < EXC_LAST; trap += 0x20) + bcopy(&trapcode, (void *)trap, trapsize); + + #ifndef __powerpc64__ + if (cpu_features & PPC_FEATURE_64) { + /* + * Copy a code snippet to restore 32-bit bridge mode + * to the top of every non-generic trap handler + */ + + trap_offset += (size_t)&restorebridgesize; + bcopy(&restorebridge, (void *)EXC_RST, trap_offset); + bcopy(&restorebridge, (void *)EXC_DSI, trap_offset); + bcopy(&restorebridge, (void *)EXC_ALI, trap_offset); + bcopy(&restorebridge, (void *)EXC_PGM, trap_offset); + bcopy(&restorebridge, (void *)EXC_MCHK, trap_offset); + bcopy(&restorebridge, (void *)EXC_TRC, trap_offset); + bcopy(&restorebridge, (void *)EXC_BPT, trap_offset); + } + #endif + + bcopy(&rstcode, (void *)(EXC_RST + trap_offset), (size_t)&rstcodeend - + (size_t)&rstcode); + +#ifdef KDB + bcopy(&dblow, (void *)(EXC_MCHK + trap_offset), (size_t)&dbend - + (size_t)&dblow); + bcopy(&dblow, (void *)(EXC_PGM + trap_offset), (size_t)&dbend - + (size_t)&dblow); + bcopy(&dblow, (void *)(EXC_TRC + trap_offset), (size_t)&dbend - + (size_t)&dblow); + bcopy(&dblow, (void *)(EXC_BPT + trap_offset), (size_t)&dbend - + (size_t)&dblow); +#endif + bcopy(&alitrap, (void *)(EXC_ALI + trap_offset), (size_t)&aliend - + (size_t)&alitrap); + bcopy(&dsitrap, (void *)(EXC_DSI + trap_offset), (size_t)&dsiend - + (size_t)&dsitrap); + + #ifdef __powerpc64__ + /* Set TOC base so that the interrupt code can get at it */ + *((void **)TRAP_GENTRAP) = &generictrap; + *((register_t *)TRAP_TOCBASE) = toc; + + bcopy(&slbtrap, (void *)EXC_DSE,(size_t)&slbtrapend - (size_t)&slbtrap); + bcopy(&slbtrap, (void *)EXC_ISE,(size_t)&slbtrapend - (size_t)&slbtrap); + #else + /* Set branch address for trap code */ + if (cpu_features & PPC_FEATURE_64) + *((void **)TRAP_GENTRAP) = &generictrap64; + else + *((void **)TRAP_GENTRAP) = &generictrap; + *((void **)TRAP_TOCBASE) = _GLOBAL_OFFSET_TABLE_; + + /* G2-specific TLB miss helper handlers */ + bcopy(&imisstrap, (void *)EXC_IMISS, (size_t)&imisssize); + bcopy(&dlmisstrap, (void *)EXC_DLMISS, (size_t)&dlmisssize); + bcopy(&dsmisstrap, (void *)EXC_DSMISS, (size_t)&dsmisssize); + #endif + __syncicache(EXC_RSVD, EXC_LAST - EXC_RSVD); + + /* + * Restore MSR + */ + mtmsr(msr); + + /* Warn if cachline size was not determined */ + if (cacheline_warn == 1) { + printf("WARNING: cacheline size undetermined, setting to 32\n"); + } + + /* + * Initialise virtual memory. Use BUS_PROBE_GENERIC priority + * in case the platform module had a better idea of what we + * should do. + */ + if (cpu_features & PPC_FEATURE_64) + pmap_mmu_install(MMU_TYPE_G5, BUS_PROBE_GENERIC); + else + pmap_mmu_install(MMU_TYPE_OEA, BUS_PROBE_GENERIC); +} + +/* + * Shutdown the CPU as much as possible. + */ +void +cpu_halt(void) +{ + + OF_exit(); +} + +int +ptrace_single_step(struct thread *td) +{ + struct trapframe *tf; + + tf = td->td_frame; + tf->srr1 |= PSL_SE; + + return (0); +} + +int +ptrace_clear_single_step(struct thread *td) +{ + struct trapframe *tf; + + tf = td->td_frame; + tf->srr1 &= ~PSL_SE; + + return (0); +} + +void +kdb_cpu_clear_singlestep(void) +{ + + kdb_frame->srr1 &= ~PSL_SE; +} + +void +kdb_cpu_set_singlestep(void) +{ + + kdb_frame->srr1 |= PSL_SE; +} + +/* + * Initialise a struct pcpu. + */ +void +cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) +{ +#ifdef __powerpc64__ +/* Copy the SLB contents from the current CPU */ +memcpy(pcpu->pc_slb, PCPU_GET(slb), sizeof(pcpu->pc_slb)); +#endif +} + +#ifndef __powerpc64__ +uint64_t +va_to_vsid(pmap_t pm, vm_offset_t va) +{ + return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); +} + +#endif + +vm_offset_t +pmap_early_io_map(vm_paddr_t pa, vm_size_t size) +{ + + return (pa); +} + +/* From p3-53 of the MPC7450 RISC Microprocessor Family Reference Manual */ +void +flush_disable_caches(void) +{ + register_t msr; + register_t msscr0; + register_t cache_reg; + volatile uint32_t *memp; + uint32_t temp; + int i; + int x; + + msr = mfmsr(); + powerpc_sync(); + mtmsr(msr & ~(PSL_EE | PSL_DR)); + msscr0 = mfspr(SPR_MSSCR0); + msscr0 &= ~MSSCR0_L2PFE; + mtspr(SPR_MSSCR0, msscr0); + powerpc_sync(); + isync(); + __asm__ __volatile__("dssall; sync"); + powerpc_sync(); + isync(); + __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); + __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); + __asm__ __volatile__("dcbf 0,%0" :: "r"(0)); + + /* Lock the L1 Data cache. */ + mtspr(SPR_LDSTCR, mfspr(SPR_LDSTCR) | 0xFF); + powerpc_sync(); + isync(); + + mtspr(SPR_LDSTCR, 0); + + /* + * Perform this in two stages: Flush the cache starting in RAM, then do it + * from ROM. + */ + memp = (volatile uint32_t *)0x00000000; + for (i = 0; i < 128 * 1024; i++) { + temp = *memp; + __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); + memp += 32/sizeof(*memp); + } + + memp = (volatile uint32_t *)0xfff00000; + x = 0xfe; + + for (; x != 0xff;) { + mtspr(SPR_LDSTCR, x); + for (i = 0; i < 128; i++) { + temp = *memp; + __asm__ __volatile__("dcbf 0,%0" :: "r"(memp)); + memp += 32/sizeof(*memp); + } + x = ((x << 1) | 1) & 0xff; + } + mtspr(SPR_LDSTCR, 0); + + cache_reg = mfspr(SPR_L2CR); + if (cache_reg & L2CR_L2E) { + cache_reg &= ~(L2CR_L2IO_7450 | L2CR_L2DO_7450); + mtspr(SPR_L2CR, cache_reg); + powerpc_sync(); + mtspr(SPR_L2CR, cache_reg | L2CR_L2HWF); + while (mfspr(SPR_L2CR) & L2CR_L2HWF) + ; /* Busy wait for cache to flush */ + powerpc_sync(); + cache_reg &= ~L2CR_L2E; + mtspr(SPR_L2CR, cache_reg); + powerpc_sync(); + mtspr(SPR_L2CR, cache_reg | L2CR_L2I); + powerpc_sync(); + while (mfspr(SPR_L2CR) & L2CR_L2I) + ; /* Busy wait for L2 cache invalidate */ + powerpc_sync(); + } + + cache_reg = mfspr(SPR_L3CR); + if (cache_reg & L3CR_L3E) { + cache_reg &= ~(L3CR_L3IO | L3CR_L3DO); + mtspr(SPR_L3CR, cache_reg); + powerpc_sync(); + mtspr(SPR_L3CR, cache_reg | L3CR_L3HWF); + while (mfspr(SPR_L3CR) & L3CR_L3HWF) + ; /* Busy wait for cache to flush */ + powerpc_sync(); + cache_reg &= ~L3CR_L3E; + mtspr(SPR_L3CR, cache_reg); + powerpc_sync(); + mtspr(SPR_L3CR, cache_reg | L3CR_L3I); + powerpc_sync(); + while (mfspr(SPR_L3CR) & L3CR_L3I) + ; /* Busy wait for L3 cache invalidate */ + powerpc_sync(); + } + + mtspr(SPR_HID0, mfspr(SPR_HID0) & ~HID0_DCE); + powerpc_sync(); + isync(); + + mtmsr(msr); +} + +void +cpu_sleep() +{ + static u_quad_t timebase = 0; + static register_t sprgs[4]; + static register_t srrs[2]; + + jmp_buf resetjb; + struct thread *fputd; + struct thread *vectd; + register_t hid0; + register_t msr; + register_t saved_msr; + + ap_pcpu = pcpup; + + PCPU_SET(restore, &resetjb); + + saved_msr = mfmsr(); + fputd = PCPU_GET(fputhread); + vectd = PCPU_GET(vecthread); + if (fputd != NULL) + save_fpu(fputd); + if (vectd != NULL) + save_vec(vectd); + if (setjmp(resetjb) == 0) { + sprgs[0] = mfspr(SPR_SPRG0); + sprgs[1] = mfspr(SPR_SPRG1); + sprgs[2] = mfspr(SPR_SPRG2); + sprgs[3] = mfspr(SPR_SPRG3); + srrs[0] = mfspr(SPR_SRR0); + srrs[1] = mfspr(SPR_SRR1); + timebase = mftb(); + powerpc_sync(); + flush_disable_caches(); + hid0 = mfspr(SPR_HID0); + hid0 = (hid0 & ~(HID0_DOZE | HID0_NAP)) | HID0_SLEEP; + powerpc_sync(); + isync(); + msr = mfmsr() | PSL_POW; + mtspr(SPR_HID0, hid0); + powerpc_sync(); + + while (1) + mtmsr(msr); + } + mttb(timebase); + PCPU_SET(curthread, curthread); + PCPU_SET(curpcb, curthread->td_pcb); + pmap_activate(curthread); + powerpc_sync(); + mtspr(SPR_SPRG0, sprgs[0]); + mtspr(SPR_SPRG1, sprgs[1]); + mtspr(SPR_SPRG2, sprgs[2]); + mtspr(SPR_SPRG3, sprgs[3]); + mtspr(SPR_SRR0, srrs[0]); + mtspr(SPR_SRR1, srrs[1]); + mtmsr(saved_msr); + if (fputd == curthread) + enable_fpu(curthread); + if (vectd == curthread) + enable_vec(curthread); + powerpc_sync(); +} + Property changes on: head/sys/powerpc/aim/aim_machdep.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/aim/mmu_oea64.c =================================================================== --- head/sys/powerpc/aim/mmu_oea64.c (revision 282263) +++ head/sys/powerpc/aim/mmu_oea64.c (revision 282264) @@ -1,2657 +1,2654 @@ /*- * Copyright (c) 2008-2015 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Manages physical address maps. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include "opt_compat.h" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmu_oea64.h" #include "mmu_if.h" #include "moea64_if.h" void moea64_release_vsid(uint64_t vsid); uintptr_t moea64_get_unique_vsid(void); #define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) #define ENABLE_TRANS(msr) mtmsr(msr) #define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) #define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) #define VSID_HASH_MASK 0x0000007fffffffffULL /* * Locking semantics: * * There are two locks of interest: the page locks and the pmap locks, which * protect their individual PVO lists and are locked in that order. The contents * of all PVO entries are protected by the locks of their respective pmaps. * The pmap of any PVO is guaranteed not to change so long as the PVO is linked * into any list. * */ #define PV_LOCK_COUNT PA_LOCK_COUNT*3 static struct mtx_padalign pv_lock[PV_LOCK_COUNT]; #define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT])) #define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) #define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) #define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED) #define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m)) #define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m)) #define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m)) struct ofw_map { cell_t om_va; cell_t om_len; uint64_t om_pa; cell_t om_mode; }; extern unsigned char _etext[]; extern unsigned char _end[]; -extern int ofw_real_mode; - /* * Map of physical memory regions. */ static struct mem_region *regions; static struct mem_region *pregions; static u_int phys_avail_count; static int regions_sz, pregions_sz; extern void bs_remap_earlyboot(void); /* * Lock for the SLB tables. */ struct mtx moea64_slb_mutex; /* * PTEG data. */ u_int moea64_pteg_count; u_int moea64_pteg_mask; /* * PVO data. */ uma_zone_t moea64_pvo_zone; /* zone for pvo entries */ static struct pvo_entry *moea64_bpvo_pool; static int moea64_bpvo_pool_index = 0; static int moea64_bpvo_pool_size = 327680; TUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size); SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, &moea64_bpvo_pool_index, 0, ""); #define VSID_NBPW (sizeof(u_int32_t) * 8) #ifdef __powerpc64__ #define NVSIDS (NPMAPS * 16) #define VSID_HASHMASK 0xffffffffUL #else #define NVSIDS NPMAPS #define VSID_HASHMASK 0xfffffUL #endif static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; static boolean_t moea64_initialized = FALSE; /* * Statistics. */ u_int moea64_pte_valid = 0; u_int moea64_pte_overflow = 0; u_int moea64_pvo_entries = 0; u_int moea64_pvo_enter_calls = 0; u_int moea64_pvo_remove_calls = 0; SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, &moea64_pte_valid, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, &moea64_pte_overflow, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, &moea64_pvo_entries, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, &moea64_pvo_enter_calls, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, &moea64_pvo_remove_calls, 0, ""); vm_offset_t moea64_scratchpage_va[2]; struct pvo_entry *moea64_scratchpage_pvo[2]; struct mtx moea64_scratchpage_mtx; uint64_t moea64_large_page_mask = 0; uint64_t moea64_large_page_size = 0; int moea64_large_page_shift = 0; /* * PVO calls. */ static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head); static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo); static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo); static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); /* * Utility routines. */ static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t); static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t); static void moea64_kremove(mmu_t, vm_offset_t); static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz); /* * Kernel MMU interface */ void moea64_clear_modify(mmu_t, vm_page_t); void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize); int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int flags, int8_t psind); void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); void moea64_init(mmu_t); boolean_t moea64_is_modified(mmu_t, vm_page_t); boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); boolean_t moea64_is_referenced(mmu_t, vm_page_t); int moea64_ts_referenced(mmu_t, vm_page_t); vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); int moea64_page_wired_mappings(mmu_t, vm_page_t); void moea64_pinit(mmu_t, pmap_t); void moea64_pinit0(mmu_t, pmap_t); void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); void moea64_qremove(mmu_t, vm_offset_t, int); void moea64_release(mmu_t, pmap_t); void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea64_remove_pages(mmu_t, pmap_t); void moea64_remove_all(mmu_t, vm_page_t); void moea64_remove_write(mmu_t, vm_page_t); void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); void moea64_zero_page(mmu_t, vm_page_t); void moea64_zero_page_area(mmu_t, vm_page_t, int, int); void moea64_zero_page_idle(mmu_t, vm_page_t); void moea64_activate(mmu_t, struct thread *); void moea64_deactivate(mmu_t, struct thread *); void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va); void moea64_scan_init(mmu_t mmu); static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_clear_modify, moea64_clear_modify), MMUMETHOD(mmu_copy_page, moea64_copy_page), MMUMETHOD(mmu_copy_pages, moea64_copy_pages), MMUMETHOD(mmu_enter, moea64_enter), MMUMETHOD(mmu_enter_object, moea64_enter_object), MMUMETHOD(mmu_enter_quick, moea64_enter_quick), MMUMETHOD(mmu_extract, moea64_extract), MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), MMUMETHOD(mmu_init, moea64_init), MMUMETHOD(mmu_is_modified, moea64_is_modified), MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), MMUMETHOD(mmu_is_referenced, moea64_is_referenced), MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), MMUMETHOD(mmu_map, moea64_map), MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), MMUMETHOD(mmu_pinit, moea64_pinit), MMUMETHOD(mmu_pinit0, moea64_pinit0), MMUMETHOD(mmu_protect, moea64_protect), MMUMETHOD(mmu_qenter, moea64_qenter), MMUMETHOD(mmu_qremove, moea64_qremove), MMUMETHOD(mmu_release, moea64_release), MMUMETHOD(mmu_remove, moea64_remove), MMUMETHOD(mmu_remove_pages, moea64_remove_pages), MMUMETHOD(mmu_remove_all, moea64_remove_all), MMUMETHOD(mmu_remove_write, moea64_remove_write), MMUMETHOD(mmu_sync_icache, moea64_sync_icache), MMUMETHOD(mmu_unwire, moea64_unwire), MMUMETHOD(mmu_zero_page, moea64_zero_page), MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), MMUMETHOD(mmu_activate, moea64_activate), MMUMETHOD(mmu_deactivate, moea64_deactivate), MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), /* Internal interfaces */ MMUMETHOD(mmu_mapdev, moea64_mapdev), MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), MMUMETHOD(mmu_unmapdev, moea64_unmapdev), MMUMETHOD(mmu_kextract, moea64_kextract), MMUMETHOD(mmu_kenter, moea64_kenter), MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), MMUMETHOD(mmu_scan_init, moea64_scan_init), MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), { 0, 0 } }; MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); static struct pvo_head * vm_page_to_pvoh(vm_page_t m) { mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); return (&m->md.mdpg_pvoh); } static struct pvo_entry * alloc_pvo_entry(int bootstrap) { struct pvo_entry *pvo; if (!moea64_initialized || bootstrap) { if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) { panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", moea64_bpvo_pool_index, moea64_bpvo_pool_size, moea64_bpvo_pool_size * sizeof(struct pvo_entry)); } pvo = &moea64_bpvo_pool[ atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)]; bzero(pvo, sizeof(*pvo)); pvo->pvo_vaddr = PVO_BOOTSTRAP; } else { pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT); bzero(pvo, sizeof(*pvo)); } return (pvo); } static void init_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) { uint64_t vsid; uint64_t hash; int shift; PMAP_LOCK_ASSERT(pmap, MA_OWNED); pvo->pvo_pmap = pmap; va &= ~ADDR_POFF; pvo->pvo_vaddr |= va; vsid = va_to_vsid(pmap, va); pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) | (vsid << 16); shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift : ADDR_PIDX_SHFT; hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift); pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; } static void free_pvo_entry(struct pvo_entry *pvo) { if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) uma_zfree(moea64_pvo_zone, pvo); } void moea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte) { lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) & LPTE_AVPN_MASK; lpte->pte_hi |= LPTE_VALID; if (pvo->pvo_vaddr & PVO_LARGE) lpte->pte_hi |= LPTE_BIG; if (pvo->pvo_vaddr & PVO_WIRED) lpte->pte_hi |= LPTE_WIRED; if (pvo->pvo_vaddr & PVO_HID) lpte->pte_hi |= LPTE_HID; lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */ if (pvo->pvo_pte.prot & VM_PROT_WRITE) lpte->pte_lo |= LPTE_BW; else lpte->pte_lo |= LPTE_BR; if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE)) lpte->pte_lo |= LPTE_NOEXEC; } static __inline uint64_t moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) { uint64_t pte_lo; int i; if (ma != VM_MEMATTR_DEFAULT) { switch (ma) { case VM_MEMATTR_UNCACHEABLE: return (LPTE_I | LPTE_G); case VM_MEMATTR_WRITE_COMBINING: case VM_MEMATTR_WRITE_BACK: case VM_MEMATTR_PREFETCHABLE: return (LPTE_I); case VM_MEMATTR_WRITE_THROUGH: return (LPTE_W | LPTE_M); } } /* * Assume the page is cache inhibited and access is guarded unless * it's in our available memory array. */ pte_lo = LPTE_I | LPTE_G; for (i = 0; i < pregions_sz; i++) { if ((pa >= pregions[i].mr_start) && (pa < (pregions[i].mr_start + pregions[i].mr_size))) { pte_lo &= ~(LPTE_I | LPTE_G); pte_lo |= LPTE_M; break; } } return pte_lo; } /* * Quick sort callout for comparing memory regions. */ static int om_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b) { const struct ofw_map *mapa; const struct ofw_map *mapb; mapa = a; mapb = b; if (mapa->om_pa < mapb->om_pa) return (-1); else if (mapa->om_pa > mapb->om_pa) return (1); else return (0); } static void moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) { struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ pcell_t acells, trans_cells[sz/sizeof(cell_t)]; struct pvo_entry *pvo; register_t msr; vm_offset_t off; vm_paddr_t pa_base; int i, j; bzero(translations, sz); OF_getprop(OF_finddevice("/"), "#address-cells", &acells, sizeof(acells)); if (OF_getprop(mmu, "translations", trans_cells, sz) == -1) panic("moea64_bootstrap: can't get ofw translations"); CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); sz /= sizeof(cell_t); for (i = 0, j = 0; i < sz; j++) { translations[j].om_va = trans_cells[i++]; translations[j].om_len = trans_cells[i++]; translations[j].om_pa = trans_cells[i++]; if (acells == 2) { translations[j].om_pa <<= 32; translations[j].om_pa |= trans_cells[i++]; } translations[j].om_mode = trans_cells[i++]; } KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)", i, sz)); sz = j; qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { pa_base = translations[i].om_pa; #ifndef __powerpc64__ if ((translations[i].om_pa >> 32) != 0) panic("OFW translations above 32-bit boundary!"); #endif if (pa_base % PAGE_SIZE) panic("OFW translation not page-aligned (phys)!"); if (translations[i].om_va % PAGE_SIZE) panic("OFW translation not page-aligned (virt)!"); CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x", pa_base, translations[i].om_va, translations[i].om_len); /* Now enter the pages for this mapping */ DISABLE_TRANS(msr); for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { /* If this address is direct-mapped, skip remapping */ if (hw_direct_map && translations[i].om_va == pa_base && moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M) continue; PMAP_LOCK(kernel_pmap); pvo = moea64_pvo_find_va(kernel_pmap, translations[i].om_va + off); PMAP_UNLOCK(kernel_pmap); if (pvo != NULL) continue; moea64_kenter(mmup, translations[i].om_va + off, pa_base + off); } ENABLE_TRANS(msr); } } #ifdef __powerpc64__ static void moea64_probe_large_page(void) { uint16_t pvr = mfpvr() >> 16; switch (pvr) { case IBM970: case IBM970FX: case IBM970MP: powerpc_sync(); isync(); mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); powerpc_sync(); isync(); /* FALLTHROUGH */ default: moea64_large_page_size = 0x1000000; /* 16 MB */ moea64_large_page_shift = 24; } moea64_large_page_mask = moea64_large_page_size - 1; } static void moea64_bootstrap_slb_prefault(vm_offset_t va, int large) { struct slb *cache; struct slb entry; uint64_t esid, slbe; uint64_t i; cache = PCPU_GET(slb); esid = va >> ADDR_SR_SHFT; slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; for (i = 0; i < 64; i++) { if (cache[i].slbe == (slbe | i)) return; } entry.slbe = slbe; entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; if (large) entry.slbv |= SLBV_L; slb_insert_kernel(entry.slbe, entry.slbv); } #endif static void moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { struct pvo_entry *pvo; register_t msr; vm_paddr_t pa; vm_offset_t size, off; uint64_t pte_lo; int i; if (moea64_large_page_size == 0) hw_direct_map = 0; DISABLE_TRANS(msr); if (hw_direct_map) { PMAP_LOCK(kernel_pmap); for (i = 0; i < pregions_sz; i++) { for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + pregions[i].mr_size; pa += moea64_large_page_size) { pte_lo = LPTE_M; pvo = alloc_pvo_entry(1 /* bootstrap */); pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; init_pvo_entry(pvo, kernel_pmap, pa); /* * Set memory access as guarded if prefetch within * the page could exit the available physmem area. */ if (pa & moea64_large_page_mask) { pa &= moea64_large_page_mask; pte_lo |= LPTE_G; } if (pa + moea64_large_page_size > pregions[i].mr_start + pregions[i].mr_size) pte_lo |= LPTE_G; pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; pvo->pvo_pte.pa = pa | pte_lo; moea64_pvo_enter(mmup, pvo, NULL); } } PMAP_UNLOCK(kernel_pmap); } else { size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); off = (vm_offset_t)(moea64_bpvo_pool); for (pa = off; pa < off + size; pa += PAGE_SIZE) moea64_kenter(mmup, pa, pa); /* * Map certain important things, like ourselves. * * NOTE: We do not map the exception vector space. That code is * used only in real mode, and leaving it unmapped allows us to * catch NULL pointer deferences, instead of making NULL a valid * address. */ for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE) moea64_kenter(mmup, pa, pa); } ENABLE_TRANS(msr); /* * Allow user to override unmapped_buf_allowed for testing. * XXXKIB Only direct map implementation was tested. */ if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", &unmapped_buf_allowed)) unmapped_buf_allowed = hw_direct_map; } void moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { int i, j; vm_size_t physsz, hwphyssz; #ifndef __powerpc64__ /* We don't have a direct map since there is no BAT */ hw_direct_map = 0; /* Make sure battable is zero, since we have no BAT */ for (i = 0; i < 16; i++) { battable[i].batu = 0; battable[i].batl = 0; } #else moea64_probe_large_page(); /* Use a direct map if we have large page support */ if (moea64_large_page_size > 0) hw_direct_map = 1; else hw_direct_map = 0; #endif /* Get physical memory regions from firmware */ mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) panic("moea64_bootstrap: phys_avail too small"); phys_avail_count = 0; physsz = 0; hwphyssz = 0; TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); for (i = 0, j = 0; i < regions_sz; i++, j += 2) { CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", regions[i].mr_start, regions[i].mr_start + regions[i].mr_size, regions[i].mr_size); if (hwphyssz != 0 && (physsz + regions[i].mr_size) >= hwphyssz) { if (physsz < hwphyssz) { phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + hwphyssz - physsz; physsz = hwphyssz; phys_avail_count++; } break; } phys_avail[j] = regions[i].mr_start; phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; phys_avail_count++; physsz += regions[i].mr_size; } /* Check for overlap with the kernel and exception vectors */ for (j = 0; j < 2*phys_avail_count; j+=2) { if (phys_avail[j] < EXC_LAST) phys_avail[j] += EXC_LAST; if (kernelstart >= phys_avail[j] && kernelstart < phys_avail[j+1]) { if (kernelend < phys_avail[j+1]) { phys_avail[2*phys_avail_count] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; phys_avail[2*phys_avail_count + 1] = phys_avail[j+1]; phys_avail_count++; } phys_avail[j+1] = kernelstart & ~PAGE_MASK; } if (kernelend >= phys_avail[j] && kernelend < phys_avail[j+1]) { if (kernelstart > phys_avail[j]) { phys_avail[2*phys_avail_count] = phys_avail[j]; phys_avail[2*phys_avail_count + 1] = kernelstart & ~PAGE_MASK; phys_avail_count++; } phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; } } physmem = btoc(physsz); #ifdef PTEGCOUNT moea64_pteg_count = PTEGCOUNT; #else moea64_pteg_count = 0x1000; while (moea64_pteg_count < physmem) moea64_pteg_count <<= 1; moea64_pteg_count >>= 1; #endif /* PTEGCOUNT */ } void moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { int i; /* * Set PTEG mask */ moea64_pteg_mask = moea64_pteg_count - 1; /* * Initialize SLB table lock and page locks */ mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); for (i = 0; i < PV_LOCK_COUNT; i++) mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF); /* * Initialise the bootstrap pvo pool. */ moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0); moea64_bpvo_pool_index = 0; /* * Make sure kernel vsid is allocated as well as VSID 0. */ #ifndef __powerpc64__ moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); moea64_vsid_bitmap[0] |= 1; #endif /* * Initialize the kernel pmap (which is statically allocated). */ #ifdef __powerpc64__ for (i = 0; i < 64; i++) { pcpup->pc_slb[i].slbv = 0; pcpup->pc_slb[i].slbe = 0; } #else for (i = 0; i < 16; i++) kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; #endif kernel_pmap->pmap_phys = kernel_pmap; CPU_FILL(&kernel_pmap->pm_active); RB_INIT(&kernel_pmap->pmap_pvo); PMAP_LOCK_INIT(kernel_pmap); /* * Now map in all the other buffers we allocated earlier */ moea64_setup_direct_map(mmup, kernelstart, kernelend); } void moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) { ihandle_t mmui; phandle_t chosen; phandle_t mmu; ssize_t sz; int i; vm_offset_t pa, va; void *dpcpu; /* * Set up the Open Firmware pmap and add its mappings if not in real * mode. */ chosen = OF_finddevice("/chosen"); - if (!ofw_real_mode && chosen != -1 && - OF_getprop(chosen, "mmu", &mmui, 4) != -1) { + if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { mmu = OF_instance_to_package(mmui); if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) sz = 0; if (sz > 6144 /* tmpstksz - 2 KB headroom */) panic("moea64_bootstrap: too many ofw translations"); if (sz > 0) moea64_add_ofw_mappings(mmup, mmu, sz); } /* * Calculate the last available physical address. */ for (i = 0; phys_avail[i + 2] != 0; i += 2) ; Maxmem = powerpc_btop(phys_avail[i + 1]); /* * Initialize MMU and remap early physical mappings */ MMU_CPU_BOOTSTRAP(mmup,0); mtmsr(mfmsr() | PSL_DR | PSL_IR); pmap_bootstrapped++; bs_remap_earlyboot(); /* * Set the start and end of kva. */ virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; /* * Map the entire KVA range into the SLB. We must not fault there. */ #ifdef __powerpc64__ for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) moea64_bootstrap_slb_prefault(va, 0); #endif /* * Figure out how far we can extend virtual_end into segment 16 * without running into existing mappings. Segment 16 is guaranteed * to contain neither RAM nor devices (at least on Apple hardware), * but will generally contain some OFW mappings we should not * step on. */ #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ PMAP_LOCK(kernel_pmap); while (virtual_end < VM_MAX_KERNEL_ADDRESS && moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) virtual_end += PAGE_SIZE; PMAP_UNLOCK(kernel_pmap); #endif /* * Allocate a kernel stack with a guard page for thread0 and map it * into the kernel page map. */ pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); thread0.td_kstack = va; thread0.td_kstack_pages = KSTACK_PAGES; for (i = 0; i < KSTACK_PAGES; i++) { moea64_kenter(mmup, va, pa); pa += PAGE_SIZE; va += PAGE_SIZE; } /* * Allocate virtual address space for the message buffer. */ pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); msgbufp = (struct msgbuf *)virtual_avail; va = virtual_avail; virtual_avail += round_page(msgbufsize); while (va < virtual_avail) { moea64_kenter(mmup, va, pa); pa += PAGE_SIZE; va += PAGE_SIZE; } /* * Allocate virtual address space for the dynamic percpu area. */ pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); dpcpu = (void *)virtual_avail; va = virtual_avail; virtual_avail += DPCPU_SIZE; while (va < virtual_avail) { moea64_kenter(mmup, va, pa); pa += PAGE_SIZE; va += PAGE_SIZE; } dpcpu_init(dpcpu, 0); /* * Allocate some things for page zeroing. We put this directly * in the page table and use MOEA64_PTE_REPLACE to avoid any * of the PVO book-keeping or other parts of the VM system * from even knowing that this hack exists. */ if (!hw_direct_map) { mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); for (i = 0; i < 2; i++) { moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; virtual_end -= PAGE_SIZE; moea64_kenter(mmup, moea64_scratchpage_va[i], 0); PMAP_LOCK(kernel_pmap); moea64_scratchpage_pvo[i] = moea64_pvo_find_va( kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); PMAP_UNLOCK(kernel_pmap); } } } /* * Activate a user pmap. This mostly involves setting some non-CPU * state. */ void moea64_activate(mmu_t mmu, struct thread *td) { pmap_t pm; pm = &td->td_proc->p_vmspace->vm_pmap; CPU_SET(PCPU_GET(cpuid), &pm->pm_active); #ifdef __powerpc64__ PCPU_SET(userslb, pm->pm_slb); __asm __volatile("slbmte %0, %1; isync" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); #else PCPU_SET(curpmap, pm->pmap_phys); mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); #endif } void moea64_deactivate(mmu_t mmu, struct thread *td) { pmap_t pm; __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR)); pm = &td->td_proc->p_vmspace->vm_pmap; CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); #ifdef __powerpc64__ PCPU_SET(userslb, NULL); #else PCPU_SET(curpmap, NULL); #endif } void moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry key, *pvo; vm_page_t m; int64_t refchg; key.pvo_vaddr = sva; PMAP_LOCK(pm); for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); pvo != NULL && PVO_VADDR(pvo) < eva; pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { if ((pvo->pvo_vaddr & PVO_WIRED) == 0) panic("moea64_unwire: pvo %p is missing PVO_WIRED", pvo); pvo->pvo_vaddr &= ~PVO_WIRED; refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */); if ((pvo->pvo_vaddr & PVO_MANAGED) && (pvo->pvo_pte.prot & VM_PROT_WRITE)) { if (refchg < 0) refchg = LPTE_CHG; m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); if (refchg & LPTE_CHG) vm_page_dirty(m); if (refchg & LPTE_REF) vm_page_aflag_set(m, PGA_REFERENCED); } pm->pm_stats.wired_count--; } PMAP_UNLOCK(pm); } /* * This goes through and sets the physical address of our * special scratch PTE to the PA we want to zero or copy. Because * of locking issues (this can get called in pvo_enter() by * the UMA allocator), we can't use most other utility functions here */ static __inline void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); moea64_scratchpage_pvo[which]->pvo_pte.pa = moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which], MOEA64_PTE_INVALIDATE); isync(); } void moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) { vm_offset_t dst; vm_offset_t src; dst = VM_PAGE_TO_PHYS(mdst); src = VM_PAGE_TO_PHYS(msrc); if (hw_direct_map) { bcopy((void *)src, (void *)dst, PAGE_SIZE); } else { mtx_lock(&moea64_scratchpage_mtx); moea64_set_scratchpage_pa(mmu, 0, src); moea64_set_scratchpage_pa(mmu, 1, dst); bcopy((void *)moea64_scratchpage_va[0], (void *)moea64_scratchpage_va[1], PAGE_SIZE); mtx_unlock(&moea64_scratchpage_mtx); } } static inline void moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_offset_t a_pg_offset, b_pg_offset; int cnt; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + a_pg_offset; b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + b_pg_offset; bcopy(a_cp, b_cp, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } } static inline void moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_offset_t a_pg_offset, b_pg_offset; int cnt; mtx_lock(&moea64_scratchpage_mtx); while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); moea64_set_scratchpage_pa(mmu, 0, VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); moea64_set_scratchpage_pa(mmu, 1, VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; bcopy(a_cp, b_cp, cnt); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } mtx_unlock(&moea64_scratchpage_mtx); } void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { if (hw_direct_map) { moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, xfersize); } else { moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, xfersize); } } void moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); if (size + off > PAGE_SIZE) panic("moea64_zero_page: size + off > PAGE_SIZE"); if (hw_direct_map) { bzero((caddr_t)pa + off, size); } else { mtx_lock(&moea64_scratchpage_mtx); moea64_set_scratchpage_pa(mmu, 0, pa); bzero((caddr_t)moea64_scratchpage_va[0] + off, size); mtx_unlock(&moea64_scratchpage_mtx); } } /* * Zero a page of physical memory by temporarily mapping it */ void moea64_zero_page(mmu_t mmu, vm_page_t m) { vm_offset_t pa = VM_PAGE_TO_PHYS(m); vm_offset_t va, off; if (!hw_direct_map) { mtx_lock(&moea64_scratchpage_mtx); moea64_set_scratchpage_pa(mmu, 0, pa); va = moea64_scratchpage_va[0]; } else { va = pa; } for (off = 0; off < PAGE_SIZE; off += cacheline_size) __asm __volatile("dcbz 0,%0" :: "r"(va + off)); if (!hw_direct_map) mtx_unlock(&moea64_scratchpage_mtx); } void moea64_zero_page_idle(mmu_t mmu, vm_page_t m) { moea64_zero_page(mmu, m); } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ int moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind) { struct pvo_entry *pvo, *oldpvo; struct pvo_head *pvo_head; uint64_t pte_lo; int error; if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); pvo = alloc_pvo_entry(0); pvo->pvo_pmap = NULL; /* to be filled in later */ pvo->pvo_pte.prot = prot; pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo; if ((flags & PMAP_ENTER_WIRED) != 0) pvo->pvo_vaddr |= PVO_WIRED; if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { pvo_head = NULL; } else { pvo_head = &m->md.mdpg_pvoh; pvo->pvo_vaddr |= PVO_MANAGED; } for (;;) { PV_PAGE_LOCK(m); PMAP_LOCK(pmap); if (pvo->pvo_pmap == NULL) init_pvo_entry(pvo, pmap, va); if (prot & VM_PROT_WRITE) if (pmap_bootstrapped && (m->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(m, PGA_WRITEABLE); oldpvo = moea64_pvo_find_va(pmap, va); if (oldpvo != NULL) { if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && oldpvo->pvo_pte.prot == prot) { /* Identical mapping already exists */ error = 0; /* If not in page table, reinsert it */ if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) { moea64_pte_overflow--; MOEA64_PTE_INSERT(mmu, oldpvo); } /* Then just clean up and go home */ PV_PAGE_UNLOCK(m); PMAP_UNLOCK(pmap); free_pvo_entry(pvo); break; } /* Otherwise, need to kill it first */ KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " "mapping does not match new mapping")); moea64_pvo_remove_from_pmap(mmu, oldpvo); } error = moea64_pvo_enter(mmu, pvo, pvo_head); PV_PAGE_UNLOCK(m); PMAP_UNLOCK(pmap); /* Free any dead pages */ if (oldpvo != NULL) { PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); moea64_pvo_remove_from_page(mmu, oldpvo); PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); free_pvo_entry(oldpvo); } if (error != ENOMEM) break; if ((flags & PMAP_ENTER_NOSLEEP) != 0) return (KERN_RESOURCE_SHORTAGE); VM_OBJECT_ASSERT_UNLOCKED(m->object); VM_WAIT; } /* * Flush the page from the instruction cache if this page is * mapped executable and cacheable. */ if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { vm_page_aflag_set(m, PGA_EXECUTABLE); moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); } return (KERN_SUCCESS); } static void moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) { /* * This is much trickier than on older systems because * we can't sync the icache on physical addresses directly * without a direct map. Instead we check a couple of cases * where the memory is already mapped in and, failing that, * use the same trick we use for page zeroing to create * a temporary mapping for this physical address. */ if (!pmap_bootstrapped) { /* * If PMAP is not bootstrapped, we are likely to be * in real mode. */ __syncicache((void *)pa, sz); } else if (pmap == kernel_pmap) { __syncicache((void *)va, sz); } else if (hw_direct_map) { __syncicache((void *)pa, sz); } else { /* Use the scratch page to set up a temp mapping */ mtx_lock(&moea64_scratchpage_mtx); moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); __syncicache((void *)(moea64_scratchpage_va[1] + (va & ADDR_POFF)), sz); mtx_unlock(&moea64_scratchpage_mtx); } } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); m = m_start; while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { moea64_enter(mmu, pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); m = TAILQ_NEXT(m, listq); } } void moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); } vm_paddr_t moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) { struct pvo_entry *pvo; vm_paddr_t pa; PMAP_LOCK(pm); pvo = moea64_pvo_find_va(pm, va); if (pvo == NULL) pa = 0; else pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); PMAP_UNLOCK(pm); return (pa); } /* * Atomically extract and hold the physical page with the given * pmap and virtual address pair if that mapping permits the given * protection. */ vm_page_t moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct pvo_entry *pvo; vm_page_t m; vm_paddr_t pa; m = NULL; pa = 0; PMAP_LOCK(pmap); retry: pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pa & LPTE_RPGN, &pa)) goto retry; m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); vm_page_hold(m); } PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } static mmu_t installed_mmu; static void * moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait) { struct pvo_entry *pvo; vm_offset_t va; vm_page_t m; int pflags, needed_lock; /* * This entire routine is a horrible hack to avoid bothering kmem * for new KVA addresses. Because this can get called from inside * kmem allocation routines, calling kmem for a new address here * can lead to multiply locking non-recursive mutexes. */ *flags = UMA_SLAB_PRIV; needed_lock = !PMAP_LOCKED(kernel_pmap); pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; for (;;) { m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); VM_WAIT; } else break; } va = VM_PAGE_TO_PHYS(m); pvo = alloc_pvo_entry(1 /* bootstrap */); pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; if (needed_lock) PMAP_LOCK(kernel_pmap); init_pvo_entry(pvo, kernel_pmap, va); pvo->pvo_vaddr |= PVO_WIRED; moea64_pvo_enter(installed_mmu, pvo, NULL); if (needed_lock) PMAP_UNLOCK(kernel_pmap); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero((void *)va, PAGE_SIZE); return (void *)va; } extern int elf32_nxstack; void moea64_init(mmu_t mmu) { CTR0(KTR_PMAP, "moea64_init"); moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); if (!hw_direct_map) { installed_mmu = mmu; uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc); } #ifdef COMPAT_FREEBSD32 elf32_nxstack = 1; #endif moea64_initialized = TRUE; } boolean_t moea64_is_referenced(mmu_t mmu, vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_is_referenced: page %p is not managed", m)); return (moea64_query_bit(mmu, m, LPTE_REF)); } boolean_t moea64_is_modified(mmu_t mmu, vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_is_modified: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can have LPTE_CHG set. */ VM_OBJECT_ASSERT_LOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return (FALSE); return (moea64_query_bit(mmu, m, LPTE_CHG)); } boolean_t moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) { struct pvo_entry *pvo; boolean_t rv = TRUE; PMAP_LOCK(pmap); pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); if (pvo != NULL) rv = FALSE; PMAP_UNLOCK(pmap); return (rv); } void moea64_clear_modify(mmu_t mmu, vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("moea64_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG * set. If the object containing the page is locked and the page is * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; moea64_clear_bit(mmu, m, LPTE_CHG); } /* * Clear the write and modified bits in each of the given page's mappings. */ void moea64_remove_write(mmu_t mmu, vm_page_t m) { struct pvo_entry *pvo; int64_t refchg, ret; pmap_t pmap; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return; powerpc_sync(); PV_PAGE_LOCK(m); refchg = 0; LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); if (!(pvo->pvo_vaddr & PVO_DEAD) && (pvo->pvo_pte.prot & VM_PROT_WRITE)) { pvo->pvo_pte.prot &= ~VM_PROT_WRITE; ret = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE); if (ret < 0) ret = LPTE_CHG; refchg |= ret; if (pvo->pvo_pmap == kernel_pmap) isync(); } PMAP_UNLOCK(pmap); } if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG) vm_page_dirty(m); vm_page_aflag_clear(m, PGA_WRITEABLE); PV_PAGE_UNLOCK(m); } /* * moea64_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int moea64_ts_referenced(mmu_t mmu, vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_ts_referenced: page %p is not managed", m)); return (moea64_clear_bit(mmu, m, LPTE_REF)); } /* * Modify the WIMG settings of all mappings for a page. */ void moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) { struct pvo_entry *pvo; int64_t refchg; pmap_t pmap; uint64_t lo; if ((m->oflags & VPO_UNMANAGED) != 0) { m->md.mdpg_cache_attrs = ma; return; } lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); if (!(pvo->pvo_vaddr & PVO_DEAD)) { pvo->pvo_pte.pa &= ~LPTE_WIMG; pvo->pvo_pte.pa |= lo; refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE); if (refchg < 0) refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? LPTE_CHG : 0; if ((pvo->pvo_vaddr & PVO_MANAGED) && (pvo->pvo_pte.prot & VM_PROT_WRITE)) { refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); if (refchg & LPTE_CHG) vm_page_dirty(m); if (refchg & LPTE_REF) vm_page_aflag_set(m, PGA_REFERENCED); } if (pvo->pvo_pmap == kernel_pmap) isync(); } PMAP_UNLOCK(pmap); } m->md.mdpg_cache_attrs = ma; PV_PAGE_UNLOCK(m); } /* * Map a wired page into kernel virtual address space. */ void moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) { int error; struct pvo_entry *pvo, *oldpvo; pvo = alloc_pvo_entry(0); pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); pvo->pvo_vaddr |= PVO_WIRED; PMAP_LOCK(kernel_pmap); oldpvo = moea64_pvo_find_va(kernel_pmap, va); if (oldpvo != NULL) moea64_pvo_remove_from_pmap(mmu, oldpvo); init_pvo_entry(pvo, kernel_pmap, va); error = moea64_pvo_enter(mmu, pvo, NULL); PMAP_UNLOCK(kernel_pmap); /* Free any dead pages */ if (oldpvo != NULL) { PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); moea64_pvo_remove_from_page(mmu, oldpvo); PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); free_pvo_entry(oldpvo); } if (error != 0 && error != ENOENT) panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, pa, error); } void moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) { moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_paddr_t moea64_kextract(mmu_t mmu, vm_offset_t va) { struct pvo_entry *pvo; vm_paddr_t pa; /* * Shortcut the direct-mapped case when applicable. We never put * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. */ if (va < VM_MIN_KERNEL_ADDRESS) return (va); PMAP_LOCK(kernel_pmap); pvo = moea64_pvo_find_va(kernel_pmap, va); KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, va)); pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); PMAP_UNLOCK(kernel_pmap); return (pa); } /* * Remove a wired page from kernel virtual address space. */ void moea64_kremove(mmu_t mmu, vm_offset_t va) { moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. Other architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped region. */ vm_offset_t moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, vm_paddr_t pa_end, int prot) { vm_offset_t sva, va; if (hw_direct_map) { /* * Check if every page in the region is covered by the direct * map. The direct map covers all of physical memory. Use * moea64_calc_wimg() as a shortcut to see if the page is in * physical memory as a way to see if the direct map covers it. */ for (va = pa_start; va < pa_end; va += PAGE_SIZE) if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) break; if (va == pa_end) return (pa_start); } sva = *virt; va = sva; /* XXX respect prot argument */ for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) moea64_kenter(mmu, va, pa_start); *virt = va; return (sva); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) { int loops; struct pvo_entry *pvo; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { rv = TRUE; break; } if (++loops >= 16) break; } PV_PAGE_UNLOCK(m); return (rv); } /* * Return the number of managed mappings to the given physical page * that are wired. */ int moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) { struct pvo_entry *pvo; int count; count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED) count++; PV_PAGE_UNLOCK(m); return (count); } static uintptr_t moea64_vsidcontext; uintptr_t moea64_get_unique_vsid(void) { u_int entropy; register_t hash; uint32_t mask; int i; entropy = 0; __asm __volatile("mftb %0" : "=r"(entropy)); mtx_lock(&moea64_slb_mutex); for (i = 0; i < NVSIDS; i += VSID_NBPW) { u_int n; /* * Create a new value by mutiplying by a prime and adding in * entropy from the timebase register. This is to make the * VSID more random so that the PT hash function collides * less often. (Note that the prime casues gcc to do shifts * instead of a multiply.) */ moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; hash = moea64_vsidcontext & (NVSIDS - 1); if (hash == 0) /* 0 is special, avoid it */ continue; n = hash >> 5; mask = 1 << (hash & (VSID_NBPW - 1)); hash = (moea64_vsidcontext & VSID_HASHMASK); if (moea64_vsid_bitmap[n] & mask) { /* collision? */ /* anything free in this bucket? */ if (moea64_vsid_bitmap[n] == 0xffffffff) { entropy = (moea64_vsidcontext >> 20); continue; } i = ffs(~moea64_vsid_bitmap[n]) - 1; mask = 1 << i; hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); hash |= i; } if (hash == VSID_VRMA) /* also special, avoid this too */ continue; KASSERT(!(moea64_vsid_bitmap[n] & mask), ("Allocating in-use VSID %#zx\n", hash)); moea64_vsid_bitmap[n] |= mask; mtx_unlock(&moea64_slb_mutex); return (hash); } mtx_unlock(&moea64_slb_mutex); panic("%s: out of segments",__func__); } #ifdef __powerpc64__ void moea64_pinit(mmu_t mmu, pmap_t pmap) { RB_INIT(&pmap->pmap_pvo); pmap->pm_slb_tree_root = slb_alloc_tree(); pmap->pm_slb = slb_alloc_user_cache(); pmap->pm_slb_len = 0; } #else void moea64_pinit(mmu_t mmu, pmap_t pmap) { int i; uint32_t hash; RB_INIT(&pmap->pmap_pvo); if (pmap_bootstrapped) pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap); else pmap->pmap_phys = pmap; /* * Allocate some segment registers for this pmap. */ hash = moea64_get_unique_vsid(); for (i = 0; i < 16; i++) pmap->pm_sr[i] = VSID_MAKE(i, hash); KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); } #endif /* * Initialize the pmap associated with process 0. */ void moea64_pinit0(mmu_t mmu, pmap_t pm) { PMAP_LOCK_INIT(pm); moea64_pinit(mmu, pm); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } /* * Set the physical protection on the specified range of this map as requested. */ static void moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) { struct vm_page *pg; vm_prot_t oldprot; int32_t refchg; PMAP_LOCK_ASSERT(pm, MA_OWNED); /* * Change the protection of the page. */ oldprot = pvo->pvo_pte.prot; pvo->pvo_pte.prot = prot; pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); /* * If the PVO is in the page table, update mapping */ refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE); if (refchg < 0) refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0; if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { if ((pg->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(pg, PGA_EXECUTABLE); moea64_syncicache(mmu, pm, PVO_VADDR(pvo), pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE); } /* * Update vm about the REF/CHG bits if the page is managed and we have * removed write access. */ if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && (oldprot & VM_PROT_WRITE)) { refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); if (refchg & LPTE_CHG) vm_page_dirty(pg); if (refchg & LPTE_REF) vm_page_aflag_set(pg, PGA_REFERENCED); } } void moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct pvo_entry *pvo, *tpvo, key; CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("moea64_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { moea64_remove(mmu, pm, sva, eva); return; } PMAP_LOCK(pm); key.pvo_vaddr = sva; for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); moea64_pvo_protect(mmu, pm, pvo, prot); } PMAP_UNLOCK(pm); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) { while (count-- > 0) { moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by moea64_qenter. */ void moea64_qremove(mmu_t mmu, vm_offset_t va, int count) { while (count-- > 0) { moea64_kremove(mmu, va); va += PAGE_SIZE; } } void moea64_release_vsid(uint64_t vsid) { int idx, mask; mtx_lock(&moea64_slb_mutex); idx = vsid & (NVSIDS-1); mask = 1 << (idx % VSID_NBPW); idx /= VSID_NBPW; KASSERT(moea64_vsid_bitmap[idx] & mask, ("Freeing unallocated VSID %#jx", vsid)); moea64_vsid_bitmap[idx] &= ~mask; mtx_unlock(&moea64_slb_mutex); } void moea64_release(mmu_t mmu, pmap_t pmap) { /* * Free segment registers' VSIDs */ #ifdef __powerpc64__ slb_free_tree(pmap); slb_free_user_cache(pmap->pm_slb); #else KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); #endif } /* * Remove all pages mapped by the specified pmap */ void moea64_remove_pages(mmu_t mmu, pmap_t pm) { struct pvo_entry *pvo, *tpvo; struct pvo_tree tofree; RB_INIT(&tofree); PMAP_LOCK(pm); RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { if (pvo->pvo_vaddr & PVO_WIRED) continue; /* * For locking reasons, remove this from the page table and * pmap, but save delinking from the vm_page for a second * pass */ moea64_pvo_remove_from_pmap(mmu, pvo); RB_INSERT(pvo_tree, &tofree, pvo); } PMAP_UNLOCK(pm); RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) { PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); moea64_pvo_remove_from_page(mmu, pvo); PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); RB_REMOVE(pvo_tree, &tofree, pvo); free_pvo_entry(pvo); } } /* * Remove the given range of addresses from the specified map. */ void moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct pvo_entry *pvo, *tpvo, key; struct pvo_tree tofree; /* * Perform an unsynchronized read. This is, however, safe. */ if (pm->pm_stats.resident_count == 0) return; key.pvo_vaddr = sva; RB_INIT(&tofree); PMAP_LOCK(pm); for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); /* * For locking reasons, remove this from the page table and * pmap, but save delinking from the vm_page for a second * pass */ moea64_pvo_remove_from_pmap(mmu, pvo); RB_INSERT(pvo_tree, &tofree, pvo); } PMAP_UNLOCK(pm); RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) { PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); moea64_pvo_remove_from_page(mmu, pvo); PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); RB_REMOVE(pvo_tree, &tofree, pvo); free_pvo_entry(pvo); } } /* * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() * will reflect changes in pte's back to the vm_page. */ void moea64_remove_all(mmu_t mmu, vm_page_t m) { struct pvo_entry *pvo, *next_pvo; struct pvo_head freequeue; int wasdead; pmap_t pmap; LIST_INIT(&freequeue); PV_PAGE_LOCK(m); LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { pmap = pvo->pvo_pmap; PMAP_LOCK(pmap); wasdead = (pvo->pvo_vaddr & PVO_DEAD); if (!wasdead) moea64_pvo_remove_from_pmap(mmu, pvo); moea64_pvo_remove_from_page(mmu, pvo); if (!wasdead) LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink); PMAP_UNLOCK(pmap); } KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings")); KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable")); PV_PAGE_UNLOCK(m); /* Clean up UMA allocations */ LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo) free_pvo_entry(pvo); } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from moea64_bootstrap before avail start and end are * calculated. */ vm_offset_t moea64_bootstrap_alloc(vm_size_t size, u_int align) { vm_offset_t s, e; int i, j; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (align != 0) s = (phys_avail[i] + align - 1) & ~(align - 1); else s = phys_avail[i]; e = s + size; if (s < phys_avail[i] || e > phys_avail[i + 1]) continue; if (s + size > platform_real_maxaddr()) continue; if (s == phys_avail[i]) { phys_avail[i] += size; } else if (e == phys_avail[i + 1]) { phys_avail[i + 1] -= size; } else { for (j = phys_avail_count * 2; j > i; j -= 2) { phys_avail[j] = phys_avail[j - 2]; phys_avail[j + 1] = phys_avail[j - 1]; } phys_avail[i + 3] = phys_avail[i + 1]; phys_avail[i + 1] = s; phys_avail[i + 2] = e; phys_avail_count++; } return (s); } panic("moea64_bootstrap_alloc: could not allocate memory"); } static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head) { int first, err; PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL, ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo))); moea64_pvo_enter_calls++; /* * Add to pmap list */ RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); /* * Remember if the list was empty and therefore will be the first * item. */ if (pvo_head != NULL) { if (LIST_FIRST(pvo_head) == NULL) first = 1; LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); } if (pvo->pvo_vaddr & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count++; pvo->pvo_pmap->pm_stats.resident_count++; /* * Insert it into the hardware page table */ err = MOEA64_PTE_INSERT(mmu, pvo); if (err != 0) { panic("moea64_pvo_enter: overflow"); } moea64_pvo_entries++; if (pvo->pvo_pmap == kernel_pmap) isync(); #ifdef __powerpc64__ /* * Make sure all our bootstrap mappings are in the SLB as soon * as virtual memory is switched on. */ if (!pmap_bootstrapped) moea64_bootstrap_slb_prefault(PVO_VADDR(pvo), pvo->pvo_vaddr & PVO_LARGE); #endif return (first ? ENOENT : 0); } static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo) { struct vm_page *pg; int32_t refchg; KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); /* * If there is an active pte entry, we need to deactivate it */ refchg = MOEA64_PTE_UNSET(mmu, pvo); if (refchg < 0) { /* * If it was evicted from the page table, be pessimistic and * dirty the page. */ if (pvo->pvo_pte.prot & VM_PROT_WRITE) refchg = LPTE_CHG; else refchg = 0; } /* * Update our statistics. */ pvo->pvo_pmap->pm_stats.resident_count--; if (pvo->pvo_vaddr & PVO_WIRED) pvo->pvo_pmap->pm_stats.wired_count--; /* * Remove this PVO from the pmap list. */ RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); /* * Mark this for the next sweep */ pvo->pvo_vaddr |= PVO_DEAD; /* Send RC bits to VM */ if ((pvo->pvo_vaddr & PVO_MANAGED) && (pvo->pvo_pte.prot & VM_PROT_WRITE)) { pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); if (pg != NULL) { refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); if (refchg & LPTE_CHG) vm_page_dirty(pg); if (refchg & LPTE_REF) vm_page_aflag_set(pg, PGA_REFERENCED); } } } static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo) { struct vm_page *pg; KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); /* Use NULL pmaps as a sentinel for races in page deletion */ if (pvo->pvo_pmap == NULL) return; pvo->pvo_pmap = NULL; /* * Update vm about page writeability/executability if managed */ PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN); pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) { LIST_REMOVE(pvo, pvo_vlink); if (LIST_EMPTY(vm_page_to_pvoh(pg))) vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE); } moea64_pvo_entries--; moea64_pvo_remove_calls++; } static struct pvo_entry * moea64_pvo_find_va(pmap_t pm, vm_offset_t va) { struct pvo_entry key; PMAP_LOCK_ASSERT(pm, MA_OWNED); key.pvo_vaddr = va & ~ADDR_POFF; return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); } static boolean_t moea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit) { struct pvo_entry *pvo; int64_t ret; boolean_t rv; /* * See if this bit is stored in the page already. */ if (m->md.mdpg_attrs & ptebit) return (TRUE); /* * Examine each PTE. Sync so that any pending REF/CHG bits are * flushed to the PTEs. */ rv = FALSE; powerpc_sync(); PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { ret = 0; /* * See if this pvo has a valid PTE. if so, fetch the * REF/CHG bits from the valid PTE. If the appropriate * ptebit is set, return success. */ PMAP_LOCK(pvo->pvo_pmap); if (!(pvo->pvo_vaddr & PVO_DEAD)) ret = MOEA64_PTE_SYNCH(mmu, pvo); PMAP_UNLOCK(pvo->pvo_pmap); if (ret > 0) { atomic_set_32(&m->md.mdpg_attrs, ret & (LPTE_CHG | LPTE_REF)); if (ret & ptebit) { rv = TRUE; break; } } } PV_PAGE_UNLOCK(m); return (rv); } static u_int moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) { u_int count; struct pvo_entry *pvo; int64_t ret; /* * Sync so that any pending REF/CHG bits are flushed to the PTEs (so * we can reset the right ones). */ powerpc_sync(); /* * For each pvo entry, clear the pte's ptebit. */ count = 0; PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { ret = 0; PMAP_LOCK(pvo->pvo_pmap); if (!(pvo->pvo_vaddr & PVO_DEAD)) ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit); PMAP_UNLOCK(pvo->pvo_pmap); if (ret > 0 && (ret & ptebit)) count++; } atomic_clear_32(&m->md.mdpg_attrs, ptebit); PV_PAGE_UNLOCK(m); return (count); } boolean_t moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) { struct pvo_entry *pvo, key; vm_offset_t ppa; int error = 0; PMAP_LOCK(kernel_pmap); key.pvo_vaddr = ppa = pa & ~ADDR_POFF; for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); ppa < pa + size; ppa += PAGE_SIZE, pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) { error = EFAULT; break; } } PMAP_UNLOCK(kernel_pmap); return (error); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) { vm_offset_t va, tmpva, ppa, offset; ppa = trunc_page(pa); offset = pa & PAGE_MASK; size = roundup2(offset + size, PAGE_SIZE); va = kva_alloc(size); if (!va) panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); for (tmpva = va; size > 0;) { moea64_kenter_attr(mmu, tmpva, ppa, ma); size -= PAGE_SIZE; tmpva += PAGE_SIZE; ppa += PAGE_SIZE; } return ((void *)(va + offset)); } void * moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) { return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); } void moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) { vm_offset_t base, offset; base = trunc_page(va); offset = va & PAGE_MASK; size = roundup2(offset + size, PAGE_SIZE); kva_free(base, size); } void moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) { struct pvo_entry *pvo; vm_offset_t lim; vm_paddr_t pa; vm_size_t len; PMAP_LOCK(pm); while (sz > 0) { lim = round_page(va); len = MIN(lim - va, sz); pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF); moea64_syncicache(mmu, pm, va, pa, len); } va += len; sz -= len; } PMAP_UNLOCK(pm); } void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) { *va = (void *)pa; } extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; void moea64_scan_init(mmu_t mmu) { struct pvo_entry *pvo; vm_offset_t va; int i; if (!do_minidump) { /* Initialize phys. segments for dumpsys(). */ memset(&dump_map, 0, sizeof(dump_map)); mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); for (i = 0; i < pregions_sz; i++) { dump_map[i].pa_start = pregions[i].mr_start; dump_map[i].pa_size = pregions[i].mr_size; } return; } /* Virtual segments for minidumps: */ memset(&dump_map, 0, sizeof(dump_map)); /* 1st: kernel .data and .bss. */ dump_map[0].pa_start = trunc_page((uintptr_t)_etext); dump_map[0].pa_size = round_page((uintptr_t)_end) - dump_map[0].pa_start; /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr; dump_map[1].pa_size = round_page(msgbufp->msg_size); /* 3rd: kernel VM. */ va = dump_map[1].pa_start + dump_map[1].pa_size; /* Find start of next chunk (from va). */ while (va < virtual_end) { /* Don't dump the buffer cache. */ if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { va = kmi.buffer_eva; continue; } pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) break; va += PAGE_SIZE; } if (va < virtual_end) { dump_map[2].pa_start = va; va += PAGE_SIZE; /* Find last page in chunk. */ while (va < virtual_end) { /* Don't run into the buffer cache. */ if (va == kmi.buffer_sva) break; pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) break; va += PAGE_SIZE; } dump_map[2].pa_size = va - dump_map[2].pa_start; } } Index: head/sys/powerpc/booke/machdep.c =================================================================== --- head/sys/powerpc/booke/machdep.c (revision 282263) +++ head/sys/powerpc/booke/machdep.c (nonexistent) @@ -1,719 +0,0 @@ -/*- - * Copyright (C) 2006-2012 Semihalf - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED - * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -/*- - * Copyright (C) 2001 Benno Rice - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ - */ -/*- - * Copyright (C) 1995, 1996 Wolfgang Solfrank. - * Copyright (C) 1995, 1996 TooLs GmbH. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by TooLs GmbH. - * 4. The name of TooLs GmbH may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; - * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR - * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); - -#include "opt_compat.h" -#include "opt_ddb.h" -#include "opt_hwpmc_hooks.h" -#include "opt_kstack_pages.h" -#include "opt_platform.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#ifdef MPC85XX -#include -#endif - -#ifdef DDB -#include -#endif - -#ifdef DEBUG -#define debugf(fmt, args...) printf(fmt, ##args) -#else -#define debugf(fmt, args...) -#endif - -extern unsigned char kernel_text[]; -extern unsigned char _etext[]; -extern unsigned char _edata[]; -extern unsigned char __bss_start[]; -extern unsigned char __sbss_start[]; -extern unsigned char __sbss_end[]; -extern unsigned char _end[]; - -/* - * Bootinfo is passed to us by legacy loaders. Save the address of the - * structure to handle backward compatibility. - */ -uint32_t *bootinfo; - -struct kva_md_info kmi; -struct pcpu __pcpu[MAXCPU]; -struct trapframe frame0; -int cold = 1; -long realmem = 0; -long Maxmem = 0; -char machine[] = "powerpc"; -SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); - -int cacheline_size = 32; - -SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, - CTLFLAG_RD, &cacheline_size, 0, ""); - -int hw_direct_map = 0; - -static void cpu_booke_startup(void *); -SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_booke_startup, NULL); - -void print_kernel_section_addr(void); -void print_kenv(void); -u_int booke_init(uint32_t, uint32_t); -void ivor_setup(void); - -extern void *interrupt_vector_base; -extern void *int_critical_input; -extern void *int_machine_check; -extern void *int_data_storage; -extern void *int_instr_storage; -extern void *int_external_input; -extern void *int_alignment; -extern void *int_program; -extern void *int_syscall; -extern void *int_decrementer; -extern void *int_fixed_interval_timer; -extern void *int_watchdog; -extern void *int_data_tlb_error; -extern void *int_inst_tlb_error; -extern void *int_debug; -#ifdef HWPMC_HOOKS -extern void *int_performance_counter; -#endif - -#define SET_TRAP(ivor, handler) \ - KASSERT(((uintptr_t)(&handler) & ~0xffffUL) == \ - ((uintptr_t)(&interrupt_vector_base) & ~0xffffUL), \ - ("Handler " #handler " too far from interrupt vector base")); \ - mtspr(ivor, (uintptr_t)(&handler) & 0xffffUL); - -void -ivor_setup(void) -{ - - mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & 0xffff0000); - - SET_TRAP(SPR_IVOR0, int_critical_input); - SET_TRAP(SPR_IVOR1, int_machine_check); - SET_TRAP(SPR_IVOR2, int_data_storage); - SET_TRAP(SPR_IVOR3, int_instr_storage); - SET_TRAP(SPR_IVOR4, int_external_input); - SET_TRAP(SPR_IVOR5, int_alignment); - SET_TRAP(SPR_IVOR6, int_program); - SET_TRAP(SPR_IVOR8, int_syscall); - SET_TRAP(SPR_IVOR10, int_decrementer); - SET_TRAP(SPR_IVOR11, int_fixed_interval_timer); - SET_TRAP(SPR_IVOR12, int_watchdog); - SET_TRAP(SPR_IVOR13, int_data_tlb_error); - SET_TRAP(SPR_IVOR14, int_inst_tlb_error); - SET_TRAP(SPR_IVOR15, int_debug); -#ifdef HWPMC_HOOKS - SET_TRAP(SPR_IVOR35, int_performance_counter); -#endif -} - -static void -cpu_booke_startup(void *dummy) -{ - int indx; - unsigned long size; - - /* Initialise the decrementer-based clock. */ - decr_init(); - - /* Good {morning,afternoon,evening,night}. */ - cpu_setup(PCPU_GET(cpuid)); - - printf("real memory = %lu (%ld MB)\n", ptoa(physmem), - ptoa(physmem) / 1048576); - realmem = physmem; - - /* Display any holes after the first chunk of extended memory. */ - if (bootverbose) { - printf("Physical memory chunk(s):\n"); - for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { - size = phys_avail[indx + 1] - phys_avail[indx]; - - printf("0x%08x - 0x%08x, %lu bytes (%lu pages)\n", - phys_avail[indx], phys_avail[indx + 1] - 1, - size, size / PAGE_SIZE); - } - } - - vm_ksubmap_init(&kmi); - - printf("avail memory = %lu (%ld MB)\n", ptoa(vm_cnt.v_free_count), - ptoa(vm_cnt.v_free_count) / 1048576); - - /* Set up buffers, so they can be used to read disk labels. */ - bufinit(); - vm_pager_bufferinit(); -} - -static char * -kenv_next(char *cp) -{ - - if (cp != NULL) { - while (*cp != 0) - cp++; - cp++; - if (*cp == 0) - cp = NULL; - } - return (cp); -} - -void -print_kenv(void) -{ - int len; - char *cp; - - debugf("loader passed (static) kenv:\n"); - if (kern_envp == NULL) { - debugf(" no env, null ptr\n"); - return; - } - debugf(" kern_envp = 0x%08x\n", (u_int32_t)kern_envp); - - len = 0; - for (cp = kern_envp; cp != NULL; cp = kenv_next(cp)) - debugf(" %x %s\n", (u_int32_t)cp, cp); -} - -void -print_kernel_section_addr(void) -{ - - debugf("kernel image addresses:\n"); - debugf(" kernel_text = 0x%08x\n", (uint32_t)kernel_text); - debugf(" _etext (sdata) = 0x%08x\n", (uint32_t)_etext); - debugf(" _edata = 0x%08x\n", (uint32_t)_edata); - debugf(" __sbss_start = 0x%08x\n", (uint32_t)__sbss_start); - debugf(" __sbss_end = 0x%08x\n", (uint32_t)__sbss_end); - debugf(" __sbss_start = 0x%08x\n", (uint32_t)__bss_start); - debugf(" _end = 0x%08x\n", (uint32_t)_end); -} - -static int -booke_check_for_fdt(uint32_t arg1, vm_offset_t *dtbp) -{ - void *ptr; - - if (arg1 % 8 != 0) - return (-1); - - ptr = (void *)pmap_early_io_map(arg1, PAGE_SIZE); - if (fdt_check_header(ptr) != 0) - return (-1); - - *dtbp = (vm_offset_t)ptr; - - return (0); -} - -u_int -booke_init(uint32_t arg1, uint32_t arg2) -{ - struct pcpu *pc; - void *kmdp, *mdp; - vm_offset_t dtbp, end; -#ifdef DDB - vm_offset_t ksym_start; - vm_offset_t ksym_end; -#endif - - kmdp = NULL; - - end = (uintptr_t)_end; - dtbp = (vm_offset_t)NULL; - - /* Set up TLB initially */ - bootinfo = NULL; - tlb1_init(); - - /* - * Handle the various ways we can get loaded and started: - * - FreeBSD's loader passes the pointer to the metadata - * in arg1, with arg2 undefined. arg1 has a value that's - * relative to the kernel's link address (i.e. larger - * than 0xc0000000). - * - Juniper's loader passes the metadata pointer in arg2 - * and sets arg1 to zero. This is to signal that the - * loader maps the kernel and starts it at its link - * address (unlike the FreeBSD loader). - * - U-Boot passes the standard argc and argv parameters - * in arg1 and arg2 (resp). arg1 is between 1 and some - * relatively small number, such as 64K. arg2 is the - * physical address of the argv vector. - * - ePAPR loaders pass an FDT blob in r3 (arg1) and the magic hex - * string 0x45504150 ('ePAP') in r6 (which has been lost by now). - * r4 (arg2) is supposed to be set to zero, but is not always. - */ - - if (arg1 == 0) /* Juniper loader */ - mdp = (void *)arg2; - else if (booke_check_for_fdt(arg1, &dtbp) == 0) { /* ePAPR */ - end = roundup(end, 8); - memmove((void *)end, (void *)dtbp, fdt_totalsize((void *)dtbp)); - dtbp = end; - end += fdt_totalsize((void *)dtbp); - mdp = NULL; - } else if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */ - mdp = (void *)arg1; - else /* U-Boot */ - mdp = NULL; - - /* - * Parse metadata and fetch parameters. - */ - if (mdp != NULL) { - preload_metadata = mdp; - kmdp = preload_search_by_type("elf kernel"); - if (kmdp != NULL) { - boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); - kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); - dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); - end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); - - bootinfo = (uint32_t *)preload_search_info(kmdp, - MODINFO_METADATA | MODINFOMD_BOOTINFO); - -#ifdef DDB - ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); - ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); - db_fetch_ksymtab(ksym_start, ksym_end); -#endif - } - } else { - bzero(__sbss_start, __sbss_end - __sbss_start); - bzero(__bss_start, _end - __bss_start); - } - -#if defined(FDT_DTB_STATIC) - /* - * In case the device tree blob was not retrieved (from metadata) try - * to use the statically embedded one. - */ - if (dtbp == (vm_offset_t)NULL) - dtbp = (vm_offset_t)&fdt_static_dtb; -#endif - - if (OF_install(OFW_FDT, 0) == FALSE) - while (1); - - if (OF_init((void *)dtbp) != 0) - while (1); - - OF_interpret("perform-fixup", 0); - - /* Reset TLB1 to get rid of temporary mappings */ - tlb1_init(); - - /* Reset Time Base */ - mttb(0); - - /* Init params/tunables that can be overridden by the loader. */ - init_param1(); - - /* Start initializing proc0 and thread0. */ - proc_linkup0(&proc0, &thread0); - thread0.td_frame = &frame0; - - /* Set up per-cpu data and store the pointer in SPR general 0. */ - pc = &__pcpu[0]; - pcpu_init(pc, 0, sizeof(struct pcpu)); - pc->pc_curthread = &thread0; -#ifdef __powerpc64__ - __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); -#else - __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); -#endif - __asm __volatile("mtsprg 0, %0" :: "r"(pc)); - - /* Initialize system mutexes. */ - mutex_init(); - - /* Initialize the console before printing anything. */ - cninit(); - - /* Print out some debug info... */ - debugf("%s: console initialized\n", __func__); - debugf(" arg3 mdp = 0x%08x\n", (u_int32_t)mdp); - debugf(" end = 0x%08x\n", (u_int32_t)end); - debugf(" boothowto = 0x%08x\n", boothowto); -#ifdef MPC85XX - debugf(" kernel ccsrbar = 0x%08x\n", CCSRBAR_VA); -#endif - debugf(" MSR = 0x%08x\n", mfmsr()); -#if defined(BOOKE_E500) - debugf(" HID0 = 0x%08x\n", mfspr(SPR_HID0)); - debugf(" HID1 = 0x%08x\n", mfspr(SPR_HID1)); - debugf(" BUCSR = 0x%08x\n", mfspr(SPR_BUCSR)); -#endif - - debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); - - print_kernel_section_addr(); - print_kenv(); -#if defined(BOOKE_E500) - //tlb1_print_entries(); - //tlb1_print_tlbentries(); -#endif - - kdb_init(); - -#ifdef KDB - if (boothowto & RB_KDB) - kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); -#endif - - /* Initialise platform module */ - platform_probe_and_attach(); - - /* Initialise virtual memory. */ - pmap_mmu_install(MMU_TYPE_BOOKE, 0); - pmap_bootstrap((uintptr_t)kernel_text, end); - debugf("MSR = 0x%08x\n", mfmsr()); -#if defined(BOOKE_E500) - //tlb1_print_entries(); - //tlb1_print_tlbentries(); -#endif - - /* Initialize params/tunables that are derived from memsize. */ - init_param2(physmem); - - /* Finish setting up thread0. */ - thread0.td_pcb = (struct pcb *) - ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - - sizeof(struct pcb)) & ~15); - bzero((void *)thread0.td_pcb, sizeof(struct pcb)); - pc->pc_curpcb = thread0.td_pcb; - - /* Initialise the message buffer. */ - msgbufinit(msgbufp, msgbufsize); - - /* Enable Machine Check interrupt. */ - mtmsr(mfmsr() | PSL_ME); - isync(); - - /* Enable L1 caches */ - booke_enable_l1_cache(); - - debugf("%s: SP = 0x%08x\n", __func__, - ((uintptr_t)thread0.td_pcb - 16) & ~15); - - return (((uintptr_t)thread0.td_pcb - 16) & ~15); -} - -#define RES_GRANULE 32 -extern uint32_t tlb0_miss_locks[]; - -/* Initialise a struct pcpu. */ -void -cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) -{ - - pcpu->pc_tid_next = TID_MIN; - -#ifdef SMP - uint32_t *ptr; - int words_per_gran = RES_GRANULE / sizeof(uint32_t); - - ptr = &tlb0_miss_locks[cpuid * words_per_gran]; - pcpu->pc_booke_tlb_lock = ptr; - *ptr = TLB_UNLOCKED; - *(ptr + 1) = 0; /* recurse counter */ -#endif -} - -/* - * Flush the D-cache for non-DMA I/O so that the I-cache can - * be made coherent later. - */ -void -cpu_flush_dcache(void *ptr, size_t len) -{ - register_t addr, off; - - /* - * Align the address to a cacheline and adjust the length - * accordingly. Then round the length to a multiple of the - * cacheline for easy looping. - */ - addr = (uintptr_t)ptr; - off = addr & (cacheline_size - 1); - addr -= off; - len = (len + off + cacheline_size - 1) & ~(cacheline_size - 1); - - while (len > 0) { - __asm __volatile ("dcbf 0,%0" :: "r"(addr)); - __asm __volatile ("sync"); - addr += cacheline_size; - len -= cacheline_size; - } -} - -void -spinlock_enter(void) -{ - struct thread *td; - register_t msr; - - td = curthread; - if (td->td_md.md_spinlock_count == 0) { - msr = intr_disable(); - td->td_md.md_spinlock_count = 1; - td->td_md.md_saved_msr = msr; - } else - td->td_md.md_spinlock_count++; - critical_enter(); -} - -void -spinlock_exit(void) -{ - struct thread *td; - register_t msr; - - td = curthread; - critical_exit(); - msr = td->td_md.md_saved_msr; - td->td_md.md_spinlock_count--; - if (td->td_md.md_spinlock_count == 0) - intr_restore(msr); -} - -/* Shutdown the CPU as much as possible. */ -void -cpu_halt(void) -{ - - mtmsr(mfmsr() & ~(PSL_CE | PSL_EE | PSL_ME | PSL_DE)); - while (1) - ; -} - -int -ptrace_set_pc(struct thread *td, unsigned long addr) -{ - struct trapframe *tf; - - tf = td->td_frame; - tf->srr0 = (register_t)addr; - - return (0); -} - -int -ptrace_single_step(struct thread *td) -{ - struct trapframe *tf; - - tf = td->td_frame; - tf->srr1 |= PSL_DE; - tf->cpu.booke.dbcr0 |= (DBCR0_IDM | DBCR0_IC); - return (0); -} - -int -ptrace_clear_single_step(struct thread *td) -{ - struct trapframe *tf; - - tf = td->td_frame; - tf->srr1 &= ~PSL_DE; - tf->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); - return (0); -} - -void -kdb_cpu_clear_singlestep(void) -{ - register_t r; - - r = mfspr(SPR_DBCR0); - mtspr(SPR_DBCR0, r & ~DBCR0_IC); - kdb_frame->srr1 &= ~PSL_DE; -} - -void -kdb_cpu_set_singlestep(void) -{ - register_t r; - - r = mfspr(SPR_DBCR0); - mtspr(SPR_DBCR0, r | DBCR0_IC | DBCR0_IDM); - kdb_frame->srr1 |= PSL_DE; -} - -void -bzero(void *buf, size_t len) -{ - caddr_t p; - - p = buf; - - while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { - *p++ = 0; - len--; - } - - while (len >= sizeof(u_long) * 8) { - *(u_long*) p = 0; - *((u_long*) p + 1) = 0; - *((u_long*) p + 2) = 0; - *((u_long*) p + 3) = 0; - len -= sizeof(u_long) * 8; - *((u_long*) p + 4) = 0; - *((u_long*) p + 5) = 0; - *((u_long*) p + 6) = 0; - *((u_long*) p + 7) = 0; - p += sizeof(u_long) * 8; - } - - while (len >= sizeof(u_long)) { - *(u_long*) p = 0; - len -= sizeof(u_long); - p += sizeof(u_long); - } - - while (len) { - *p++ = 0; - len--; - } -} - Property changes on: head/sys/powerpc/booke/machdep.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/booke/booke_machdep.c =================================================================== --- head/sys/powerpc/booke/booke_machdep.c (nonexistent) +++ head/sys/powerpc/booke/booke_machdep.c (revision 282264) @@ -0,0 +1,388 @@ +/*- + * Copyright (C) 2006-2012 Semihalf + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN + * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED + * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Copyright (C) 2001 Benno Rice + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ + */ +/*- + * Copyright (C) 1995, 1996 Wolfgang Solfrank. + * Copyright (C) 1995, 1996 TooLs GmbH. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by TooLs GmbH. + * 4. The name of TooLs GmbH may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_compat.h" +#include "opt_ddb.h" +#include "opt_hwpmc_hooks.h" +#include "opt_kstack_pages.h" +#include "opt_platform.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#ifdef MPC85XX +#include +#endif + +#ifdef DDB +#include +#endif + +#ifdef DEBUG +#define debugf(fmt, args...) printf(fmt, ##args) +#else +#define debugf(fmt, args...) +#endif + +extern unsigned char kernel_text[]; +extern unsigned char _etext[]; +extern unsigned char _edata[]; +extern unsigned char __bss_start[]; +extern unsigned char __sbss_start[]; +extern unsigned char __sbss_end[]; +extern unsigned char _end[]; +extern vm_offset_t __endkernel; + +/* + * Bootinfo is passed to us by legacy loaders. Save the address of the + * structure to handle backward compatibility. + */ +uint32_t *bootinfo; + +void print_kernel_section_addr(void); +void print_kenv(void); +u_int booke_init(uint32_t, uint32_t); +void ivor_setup(void); + +extern void *interrupt_vector_base; +extern void *int_critical_input; +extern void *int_machine_check; +extern void *int_data_storage; +extern void *int_instr_storage; +extern void *int_external_input; +extern void *int_alignment; +extern void *int_program; +extern void *int_syscall; +extern void *int_decrementer; +extern void *int_fixed_interval_timer; +extern void *int_watchdog; +extern void *int_data_tlb_error; +extern void *int_inst_tlb_error; +extern void *int_debug; +#ifdef HWPMC_HOOKS +extern void *int_performance_counter; +#endif + +#define SET_TRAP(ivor, handler) \ + KASSERT(((uintptr_t)(&handler) & ~0xffffUL) == \ + ((uintptr_t)(&interrupt_vector_base) & ~0xffffUL), \ + ("Handler " #handler " too far from interrupt vector base")); \ + mtspr(ivor, (uintptr_t)(&handler) & 0xffffUL); + +uintptr_t powerpc_init(vm_offset_t fdt, vm_offset_t, vm_offset_t, void *mdp); +void booke_cpu_init(void); + +void +booke_cpu_init(void) +{ + + pmap_mmu_install(MMU_TYPE_BOOKE, BUS_PROBE_GENERIC); +} + +void +ivor_setup(void) +{ + + mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & 0xffff0000); + + SET_TRAP(SPR_IVOR0, int_critical_input); + SET_TRAP(SPR_IVOR1, int_machine_check); + SET_TRAP(SPR_IVOR2, int_data_storage); + SET_TRAP(SPR_IVOR3, int_instr_storage); + SET_TRAP(SPR_IVOR4, int_external_input); + SET_TRAP(SPR_IVOR5, int_alignment); + SET_TRAP(SPR_IVOR6, int_program); + SET_TRAP(SPR_IVOR8, int_syscall); + SET_TRAP(SPR_IVOR10, int_decrementer); + SET_TRAP(SPR_IVOR11, int_fixed_interval_timer); + SET_TRAP(SPR_IVOR12, int_watchdog); + SET_TRAP(SPR_IVOR13, int_data_tlb_error); + SET_TRAP(SPR_IVOR14, int_inst_tlb_error); + SET_TRAP(SPR_IVOR15, int_debug); +#ifdef HWPMC_HOOKS + SET_TRAP(SPR_IVOR35, int_performance_counter); +#endif +} + +static int +booke_check_for_fdt(uint32_t arg1, vm_offset_t *dtbp) +{ + void *ptr; + + if (arg1 % 8 != 0) + return (-1); + + ptr = (void *)pmap_early_io_map(arg1, PAGE_SIZE); + if (fdt_check_header(ptr) != 0) + return (-1); + + *dtbp = (vm_offset_t)ptr; + + return (0); +} + +uintptr_t +booke_init(uint32_t arg1, uint32_t arg2) +{ + uintptr_t ret; + void *mdp; + vm_offset_t dtbp, end; + + end = (uintptr_t)_end; + dtbp = (vm_offset_t)NULL; + + /* Set up TLB initially */ + bootinfo = NULL; + bzero(__sbss_start, __sbss_end - __sbss_start); + bzero(__bss_start, _end - __bss_start); + tlb1_init(); + + /* + * Handle the various ways we can get loaded and started: + * - FreeBSD's loader passes the pointer to the metadata + * in arg1, with arg2 undefined. arg1 has a value that's + * relative to the kernel's link address (i.e. larger + * than 0xc0000000). + * - Juniper's loader passes the metadata pointer in arg2 + * and sets arg1 to zero. This is to signal that the + * loader maps the kernel and starts it at its link + * address (unlike the FreeBSD loader). + * - U-Boot passes the standard argc and argv parameters + * in arg1 and arg2 (resp). arg1 is between 1 and some + * relatively small number, such as 64K. arg2 is the + * physical address of the argv vector. + * - ePAPR loaders pass an FDT blob in r3 (arg1) and the magic hex + * string 0x45504150 ('ePAP') in r6 (which has been lost by now). + * r4 (arg2) is supposed to be set to zero, but is not always. + */ + + if (arg1 == 0) /* Juniper loader */ + mdp = (void *)arg2; + else if (booke_check_for_fdt(arg1, &dtbp) == 0) { /* ePAPR */ + end = roundup(end, 8); + memmove((void *)end, (void *)dtbp, fdt_totalsize((void *)dtbp)); + dtbp = end; + end += fdt_totalsize((void *)dtbp); + __endkernel = end; + mdp = NULL; + } else if (arg1 > (uintptr_t)kernel_text) /* FreeBSD loader */ + mdp = (void *)arg1; + else /* U-Boot */ + mdp = NULL; + + /* Reset TLB1 to get rid of temporary mappings */ + tlb1_init(); + + ret = powerpc_init(dtbp, 0, 0, mdp); + + /* Enable L1 caches */ + booke_enable_l1_cache(); + + return (ret); +} + +#define RES_GRANULE 32 +extern uint32_t tlb0_miss_locks[]; + +/* Initialise a struct pcpu. */ +void +cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t sz) +{ + + pcpu->pc_tid_next = TID_MIN; + +#ifdef SMP + uint32_t *ptr; + int words_per_gran = RES_GRANULE / sizeof(uint32_t); + + ptr = &tlb0_miss_locks[cpuid * words_per_gran]; + pcpu->pc_booke_tlb_lock = ptr; + *ptr = TLB_UNLOCKED; + *(ptr + 1) = 0; /* recurse counter */ +#endif +} + +/* Shutdown the CPU as much as possible. */ +void +cpu_halt(void) +{ + + mtmsr(mfmsr() & ~(PSL_CE | PSL_EE | PSL_ME | PSL_DE)); + while (1) + ; +} + +int +ptrace_single_step(struct thread *td) +{ + struct trapframe *tf; + + tf = td->td_frame; + tf->srr1 |= PSL_DE; + tf->cpu.booke.dbcr0 |= (DBCR0_IDM | DBCR0_IC); + return (0); +} + +int +ptrace_clear_single_step(struct thread *td) +{ + struct trapframe *tf; + + tf = td->td_frame; + tf->srr1 &= ~PSL_DE; + tf->cpu.booke.dbcr0 &= ~(DBCR0_IDM | DBCR0_IC); + return (0); +} + +void +kdb_cpu_clear_singlestep(void) +{ + register_t r; + + r = mfspr(SPR_DBCR0); + mtspr(SPR_DBCR0, r & ~DBCR0_IC); + kdb_frame->srr1 &= ~PSL_DE; +} + +void +kdb_cpu_set_singlestep(void) +{ + register_t r; + + r = mfspr(SPR_DBCR0); + mtspr(SPR_DBCR0, r | DBCR0_IC | DBCR0_IDM); + kdb_frame->srr1 |= PSL_DE; +} + Property changes on: head/sys/powerpc/booke/booke_machdep.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/booke/pmap.c =================================================================== --- head/sys/powerpc/booke/pmap.c (revision 282263) +++ head/sys/powerpc/booke/pmap.c (revision 282264) @@ -1,3327 +1,3322 @@ /*- * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Some hw specific parts of this pmap were derived or influenced * by NetBSD's ibm4xx pmap module. More generic code is shared with * a few other pmap modules from the FreeBSD tree. */ /* * VM layout notes: * * Kernel and user threads run within one common virtual address space * defined by AS=0. * * Virtual address space layout: * ----------------------------- * 0x0000_0000 - 0xafff_ffff : user process * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) * 0xc000_0000 - 0xc0ff_ffff : kernel reserved * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. * 0xc100_0000 - 0xfeef_ffff : KVA * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space * 0xfef0_0000 - 0xffff_ffff : I/O devices region */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mmu_if.h" #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif #define TODO panic("%s: not implemented", __func__); extern unsigned char _etext[]; extern unsigned char _end[]; extern uint32_t *bootinfo; #ifdef SMP extern uint32_t bp_ntlb1s; #endif vm_paddr_t kernload; vm_offset_t kernstart; vm_size_t kernsize; /* Message buffer and tables. */ static vm_offset_t data_start; static vm_size_t data_end; /* Phys/avail memory regions. */ static struct mem_region *availmem_regions; static int availmem_regions_sz; static struct mem_region *physmem_regions; static int physmem_regions_sz; /* Reserved KVA space and mutex for mmu_booke_zero_page. */ static vm_offset_t zero_page_va; static struct mtx zero_page_mutex; static struct mtx tlbivax_mutex; /* * Reserved KVA space for mmu_booke_zero_page_idle. This is used * by idle thred only, no lock required. */ static vm_offset_t zero_page_idle_va; /* Reserved KVA space and mutex for mmu_booke_copy_page. */ static vm_offset_t copy_page_src_va; static vm_offset_t copy_page_dst_va; static struct mtx copy_page_mutex; /**************************************************************************/ /* PMAP */ /**************************************************************************/ static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int flags, int8_t psind); unsigned int kptbl_min; /* Index of the first kernel ptbl. */ unsigned int kernel_ptbls; /* Number of KVA ptbls. */ /* * If user pmap is processed with mmu_booke_remove and the resident count * drops to 0, there are no more pages to remove, so we need not continue. */ #define PMAP_REMOVE_DONE(pmap) \ ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) extern void tid_flush(tlbtid_t tid, int tlb0_ways, int tlb0_entries_per_way); extern int elf32_nxstack; /**************************************************************************/ /* TLB and TID handling */ /**************************************************************************/ /* Translation ID busy table */ static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; /* * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 * core revisions and should be read from h/w registers during early config. */ uint32_t tlb0_entries; uint32_t tlb0_ways; uint32_t tlb0_entries_per_way; #define TLB0_ENTRIES (tlb0_entries) #define TLB0_WAYS (tlb0_ways) #define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) #define TLB1_ENTRIES 16 /* In-ram copy of the TLB1 */ static tlb_entry_t tlb1[TLB1_ENTRIES]; /* Next free entry in the TLB1 */ static unsigned int tlb1_idx; static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS; static tlbtid_t tid_alloc(struct pmap *); static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); static void tlb1_write_entry(unsigned int); static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); static vm_size_t tsize2size(unsigned int); static unsigned int size2tsize(vm_size_t); static unsigned int ilog2(unsigned int); static void set_mas4_defaults(void); static inline void tlb0_flush_entry(vm_offset_t); static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); /**************************************************************************/ /* Page table management */ /**************************************************************************/ static struct rwlock_padalign pvh_global_lock; /* Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif static void ptbl_init(void); static struct ptbl_buf *ptbl_buf_alloc(void); static void ptbl_buf_free(struct ptbl_buf *); static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); static void ptbl_free(mmu_t, pmap_t, unsigned int); static void ptbl_hold(mmu_t, pmap_t, unsigned int); static int ptbl_unhold(mmu_t, pmap_t, unsigned int); static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); static pv_entry_t pv_alloc(void); static void pv_free(pv_entry_t); static void pv_insert(pmap_t, vm_offset_t, vm_page_t); static void pv_remove(pmap_t, vm_offset_t, vm_page_t); /* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ #define PTBL_BUFS (128 * 16) struct ptbl_buf { TAILQ_ENTRY(ptbl_buf) link; /* list link */ vm_offset_t kva; /* va of mapping */ }; /* ptbl free list and a lock used for access synchronization. */ static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; static struct mtx ptbl_buf_freelist_lock; /* Base address of kva space allocated fot ptbl bufs. */ static vm_offset_t ptbl_buf_pool_vabase; /* Pointer to ptbl_buf structures. */ static struct ptbl_buf *ptbl_bufs; #ifdef SMP void pmap_bootstrap_ap(volatile uint32_t *); #endif /* * Kernel MMU interface */ static void mmu_booke_clear_modify(mmu_t, vm_page_t); static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t); static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); static void mmu_booke_copy_pages(mmu_t, vm_page_t *, vm_offset_t, vm_page_t *, vm_offset_t, int); static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int flags, int8_t psind); static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, vm_prot_t); static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); static void mmu_booke_init(mmu_t); static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); static int mmu_booke_ts_referenced(mmu_t, vm_page_t); static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, vm_paddr_t *); static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); static void mmu_booke_page_init(mmu_t, vm_page_t); static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); static void mmu_booke_pinit(mmu_t, pmap_t); static void mmu_booke_pinit0(mmu_t, pmap_t); static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); static void mmu_booke_qremove(mmu_t, vm_offset_t, int); static void mmu_booke_release(mmu_t, pmap_t); static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); static void mmu_booke_remove_all(mmu_t, vm_page_t); static void mmu_booke_remove_write(mmu_t, vm_page_t); static void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); static void mmu_booke_zero_page(mmu_t, vm_page_t); static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); static void mmu_booke_activate(mmu_t, struct thread *); static void mmu_booke_deactivate(mmu_t, struct thread *); static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); static void mmu_booke_kremove(mmu_t, vm_offset_t); static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t, void **); static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t, void *); static void mmu_booke_scan_init(mmu_t); static mmu_method_t mmu_booke_methods[] = { /* pmap dispatcher interface */ MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), MMUMETHOD(mmu_copy, mmu_booke_copy), MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), MMUMETHOD(mmu_enter, mmu_booke_enter), MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), MMUMETHOD(mmu_extract, mmu_booke_extract), MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), MMUMETHOD(mmu_init, mmu_booke_init), MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), MMUMETHOD(mmu_map, mmu_booke_map), MMUMETHOD(mmu_mincore, mmu_booke_mincore), MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), MMUMETHOD(mmu_page_init, mmu_booke_page_init), MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), MMUMETHOD(mmu_pinit, mmu_booke_pinit), MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), MMUMETHOD(mmu_protect, mmu_booke_protect), MMUMETHOD(mmu_qenter, mmu_booke_qenter), MMUMETHOD(mmu_qremove, mmu_booke_qremove), MMUMETHOD(mmu_release, mmu_booke_release), MMUMETHOD(mmu_remove, mmu_booke_remove), MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), MMUMETHOD(mmu_unwire, mmu_booke_unwire), MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), MMUMETHOD(mmu_activate, mmu_booke_activate), MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), /* Internal interfaces */ MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), MMUMETHOD(mmu_kenter, mmu_booke_kenter), MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), MMUMETHOD(mmu_kextract, mmu_booke_kextract), /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), /* dumpsys() support */ MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), MMUMETHOD(mmu_scan_init, mmu_booke_scan_init), { 0, 0 } }; MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); static __inline uint32_t tlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma) { uint32_t attrib; int i; if (ma != VM_MEMATTR_DEFAULT) { switch (ma) { case VM_MEMATTR_UNCACHEABLE: return (PTE_I | PTE_G); case VM_MEMATTR_WRITE_COMBINING: case VM_MEMATTR_WRITE_BACK: case VM_MEMATTR_PREFETCHABLE: return (PTE_I); case VM_MEMATTR_WRITE_THROUGH: return (PTE_W | PTE_M); } } /* * Assume the page is cache inhibited and access is guarded unless * it's in our available memory array. */ attrib = _TLB_ENTRY_IO; for (i = 0; i < physmem_regions_sz; i++) { if ((pa >= physmem_regions[i].mr_start) && (pa < (physmem_regions[i].mr_start + physmem_regions[i].mr_size))) { attrib = _TLB_ENTRY_MEM; break; } } return (attrib); } static inline void tlb_miss_lock(void) { #ifdef SMP struct pcpu *pc; if (!smp_started) return; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) { CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), ("tlb_miss_lock: tried to lock self")); tlb_lock(pc->pc_booke_tlb_lock); CTR1(KTR_PMAP, "%s: locked", __func__); } } #endif } static inline void tlb_miss_unlock(void) { #ifdef SMP struct pcpu *pc; if (!smp_started) return; STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) { CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", __func__, pc->pc_cpuid); tlb_unlock(pc->pc_booke_tlb_lock); CTR1(KTR_PMAP, "%s: unlocked", __func__); } } #endif } /* Return number of entries in TLB0. */ static __inline void tlb0_get_tlbconf(void) { uint32_t tlb0_cfg; tlb0_cfg = mfspr(SPR_TLB0CFG); tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; tlb0_entries_per_way = tlb0_entries / tlb0_ways; } /* Initialize pool of kva ptbl buffers. */ static void ptbl_init(void) { int i; CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); TAILQ_INIT(&ptbl_buf_freelist); for (i = 0; i < PTBL_BUFS; i++) { ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); } } /* Get a ptbl_buf from the freelist. */ static struct ptbl_buf * ptbl_buf_alloc(void) { struct ptbl_buf *buf; mtx_lock(&ptbl_buf_freelist_lock); buf = TAILQ_FIRST(&ptbl_buf_freelist); if (buf != NULL) TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); mtx_unlock(&ptbl_buf_freelist_lock); CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); return (buf); } /* Return ptbl buff to free pool. */ static void ptbl_buf_free(struct ptbl_buf *buf) { CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); mtx_lock(&ptbl_buf_freelist_lock); TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); mtx_unlock(&ptbl_buf_freelist_lock); } /* * Search the list of allocated ptbl bufs and find on list of allocated ptbls */ static void ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) { struct ptbl_buf *pbuf; CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); PMAP_LOCK_ASSERT(pmap, MA_OWNED); TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) if (pbuf->kva == (vm_offset_t)ptbl) { /* Remove from pmap ptbl buf list. */ TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); /* Free corresponding ptbl buf. */ ptbl_buf_free(pbuf); break; } } /* Allocate page table. */ static pte_t * ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) { vm_page_t mtbl[PTBL_PAGES]; vm_page_t m; struct ptbl_buf *pbuf; unsigned int pidx; pte_t *ptbl; int i, j; CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, (pmap == kernel_pmap), pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), ("ptbl_alloc: invalid pdir_idx")); KASSERT((pmap->pm_pdir[pdir_idx] == NULL), ("pte_alloc: valid ptbl entry exists!")); pbuf = ptbl_buf_alloc(); if (pbuf == NULL) panic("pte_alloc: couldn't alloc kernel virtual memory"); ptbl = (pte_t *)pbuf->kva; CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); /* Allocate ptbl pages, this will sleep! */ for (i = 0; i < PTBL_PAGES; i++) { pidx = (PTBL_PAGES * pdir_idx) + i; while ((m = vm_page_alloc(NULL, pidx, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); if (nosleep) { ptbl_free_pmap_ptbl(pmap, ptbl); for (j = 0; j < i; j++) vm_page_free(mtbl[j]); atomic_subtract_int(&vm_cnt.v_wire_count, i); return (NULL); } VM_WAIT; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); } mtbl[i] = m; } /* Map allocated pages into kernel_pmap. */ mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); /* Zero whole ptbl. */ bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); /* Add pbuf to the pmap ptbl bufs list. */ TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); return (ptbl); } /* Free ptbl pages and invalidate pdir entry. */ static void ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) { pte_t *ptbl; vm_paddr_t pa; vm_offset_t va; vm_page_t m; int i; CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, (pmap == kernel_pmap), pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), ("ptbl_free: invalid pdir_idx")); ptbl = pmap->pm_pdir[pdir_idx]; CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); /* * Invalidate the pdir entry as soon as possible, so that other CPUs * don't attempt to look up the page tables we are releasing. */ mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); pmap->pm_pdir[pdir_idx] = NULL; tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); for (i = 0; i < PTBL_PAGES; i++) { va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); pa = pte_vatopa(mmu, kernel_pmap, va); m = PHYS_TO_VM_PAGE(pa); vm_page_free_zero(m); atomic_subtract_int(&vm_cnt.v_wire_count, 1); mmu_booke_kremove(mmu, va); } ptbl_free_pmap_ptbl(pmap, ptbl); } /* * Decrement ptbl pages hold count and attempt to free ptbl pages. * Called when removing pte entry from ptbl. * * Return 1 if ptbl pages were freed. */ static int ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) { pte_t *ptbl; vm_paddr_t pa; vm_page_t m; int i; CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, (pmap == kernel_pmap), pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), ("ptbl_unhold: invalid pdir_idx")); KASSERT((pmap != kernel_pmap), ("ptbl_unhold: unholding kernel ptbl!")); ptbl = pmap->pm_pdir[pdir_idx]; //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), ("ptbl_unhold: non kva ptbl")); /* decrement hold count */ for (i = 0; i < PTBL_PAGES; i++) { pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE)); m = PHYS_TO_VM_PAGE(pa); m->wire_count--; } /* * Free ptbl pages if there are no pte etries in this ptbl. * wire_count has the same value for all ptbl pages, so check the last * page. */ if (m->wire_count == 0) { ptbl_free(mmu, pmap, pdir_idx); //debugf("ptbl_unhold: e (freed ptbl)\n"); return (1); } return (0); } /* * Increment hold count for ptbl pages. This routine is used when a new pte * entry is being inserted into the ptbl. */ static void ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) { vm_paddr_t pa; pte_t *ptbl; vm_page_t m; int i; CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, pdir_idx); KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), ("ptbl_hold: invalid pdir_idx")); KASSERT((pmap != kernel_pmap), ("ptbl_hold: holding kernel ptbl!")); ptbl = pmap->pm_pdir[pdir_idx]; KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); for (i = 0; i < PTBL_PAGES; i++) { pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE)); m = PHYS_TO_VM_PAGE(pa); m->wire_count++; } } /* Allocate pv_entry structure. */ pv_entry_t pv_alloc(void) { pv_entry_t pv; pv_entry_count++; if (pv_entry_count > pv_entry_high_water) pagedaemon_wakeup(); pv = uma_zalloc(pvzone, M_NOWAIT); return (pv); } /* Free pv_entry structure. */ static __inline void pv_free(pv_entry_t pve) { pv_entry_count--; uma_zfree(pvzone, pve); } /* Allocate and initialize pv_entry structure. */ static void pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pve; //int su = (pmap == kernel_pmap); //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, // (u_int32_t)pmap, va, (u_int32_t)m); pve = pv_alloc(); if (pve == NULL) panic("pv_insert: no pv entries!"); pve->pv_pmap = pmap; pve->pv_va = va; /* add to pv_list */ PMAP_LOCK_ASSERT(pmap, MA_OWNED); rw_assert(&pvh_global_lock, RA_WLOCKED); TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); //debugf("pv_insert: e\n"); } /* Destroy pv entry. */ static void pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pve; //int su = (pmap == kernel_pmap); //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); PMAP_LOCK_ASSERT(pmap, MA_OWNED); rw_assert(&pvh_global_lock, RA_WLOCKED); /* find pv entry */ TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { /* remove from pv_list */ TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); /* free pv entry struct */ pv_free(pve); break; } } //debugf("pv_remove: e\n"); } /* * Clean pte entry, try to free page table page if requested. * * Return 1 if ptbl pages were freed, otherwise return 0. */ static int pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); vm_page_t m; pte_t *ptbl; pte_t *pte; //int su = (pmap == kernel_pmap); //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", // su, (u_int32_t)pmap, va, flags); ptbl = pmap->pm_pdir[pdir_idx]; KASSERT(ptbl, ("pte_remove: null ptbl")); pte = &ptbl[ptbl_idx]; if (pte == NULL || !PTE_ISVALID(pte)) return (0); if (PTE_ISWIRED(pte)) pmap->pm_stats.wired_count--; /* Handle managed entry. */ if (PTE_ISMANAGED(pte)) { /* Get vm_page_t for mapped pte. */ m = PHYS_TO_VM_PAGE(PTE_PA(pte)); if (PTE_ISMODIFIED(pte)) vm_page_dirty(m); if (PTE_ISREFERENCED(pte)) vm_page_aflag_set(m, PGA_REFERENCED); pv_remove(pmap, va, m); } mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); pte->flags = 0; pte->rpn = 0; tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); pmap->pm_stats.resident_count--; if (flags & PTBL_UNHOLD) { //debugf("pte_remove: e (unhold)\n"); return (ptbl_unhold(mmu, pmap, pdir_idx)); } //debugf("pte_remove: e\n"); return (0); } /* * Insert PTE for a given page and virtual address. */ static int pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, boolean_t nosleep) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); pte_t *ptbl, *pte; CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, pmap == kernel_pmap, pmap, va); /* Get the page table pointer. */ ptbl = pmap->pm_pdir[pdir_idx]; if (ptbl == NULL) { /* Allocate page table pages. */ ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); if (ptbl == NULL) { KASSERT(nosleep, ("nosleep and NULL ptbl")); return (ENOMEM); } } else { /* * Check if there is valid mapping for requested * va, if there is, remove it. */ pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; if (PTE_ISVALID(pte)) { pte_remove(mmu, pmap, va, PTBL_HOLD); } else { /* * pte is not used, increment hold count * for ptbl pages. */ if (pmap != kernel_pmap) ptbl_hold(mmu, pmap, pdir_idx); } } /* * Insert pv_entry into pv_list for mapped page if part of managed * memory. */ if ((m->oflags & VPO_UNMANAGED) == 0) { flags |= PTE_MANAGED; /* Create and insert pv entry. */ pv_insert(pmap, va, m); } pmap->pm_stats.resident_count++; mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); if (pmap->pm_pdir[pdir_idx] == NULL) { /* * If we just allocated a new page table, hook it in * the pdir. */ pmap->pm_pdir[pdir_idx] = ptbl; } pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; pte->flags |= (PTE_VALID | flags); tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); return (0); } /* Return the pa for the given pmap/va. */ static vm_paddr_t pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) { vm_paddr_t pa = 0; pte_t *pte; pte = pte_find(mmu, pmap, va); if ((pte != NULL) && PTE_ISVALID(pte)) pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); return (pa); } /* Get a pointer to a PTE in a page table. */ static pte_t * pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); KASSERT((pmap != NULL), ("pte_find: invalid pmap")); if (pmap->pm_pdir[pdir_idx]) return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); return (NULL); } /**************************************************************************/ /* PMAP related */ /**************************************************************************/ /* * This is called during booke_init, before the system is really initialized. */ static void mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) { vm_offset_t phys_kernelend; struct mem_region *mp, *mp1; int cnt, i, j; u_int s, e, sz; u_int phys_avail_count; vm_size_t physsz, hwphyssz, kstack0_sz; vm_offset_t kernel_pdir, kstack0, va; vm_paddr_t kstack0_phys; void *dpcpu; pte_t *pte; debugf("mmu_booke_bootstrap: entered\n"); /* Set interesting system properties */ hw_direct_map = 0; elf32_nxstack = 1; /* Initialize invalidation mutex */ mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); /* Read TLB0 size and associativity. */ tlb0_get_tlbconf(); /* * Align kernel start and end address (kernel image). * Note that kernel end does not necessarily relate to kernsize. * kernsize is the size of the kernel that is actually mapped. - * Also note that "start - 1" is deliberate. With SMP, the - * entry point is exactly a page from the actual load address. - * As such, trunc_page() has no effect and we're off by a page. - * Since we always have the ELF header between the load address - * and the entry point, we can safely subtract 1 to compensate. */ - kernstart = trunc_page(start - 1); + kernstart = trunc_page(start); data_start = round_page(kernelend); data_end = data_start; /* * Addresses of preloaded modules (like file systems) use * physical addresses. Make sure we relocate those into * virtual addresses. */ preload_addr_relocate = kernstart - kernload; /* Allocate the dynamic per-cpu area. */ dpcpu = (void *)data_end; data_end += DPCPU_SIZE; /* Allocate space for the message buffer. */ msgbufp = (struct msgbuf *)data_end; data_end += msgbufsize; debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, data_end); data_end = round_page(data_end); /* Allocate space for ptbl_bufs. */ ptbl_bufs = (struct ptbl_buf *)data_end; data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, data_end); data_end = round_page(data_end); /* Allocate PTE tables for kernel KVA. */ kernel_pdir = data_end; kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + PDIR_SIZE - 1) / PDIR_SIZE; data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; debugf(" kernel ptbls: %d\n", kernel_ptbls); debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); debugf(" data_end: 0x%08x\n", data_end); if (data_end - kernstart > kernsize) { kernsize += tlb1_mapin_region(kernstart + kernsize, kernload + kernsize, (data_end - kernstart) - kernsize); } data_end = kernstart + kernsize; debugf(" updated data_end: 0x%08x\n", data_end); /* * Clear the structures - note we can only do it safely after the * possible additional TLB1 translations are in place (above) so that * all range up to the currently calculated 'data_end' is covered. */ dpcpu_init(dpcpu, 0); memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); /*******************************************************/ /* Set the start and end of kva. */ /*******************************************************/ virtual_avail = round_page(data_end); virtual_end = VM_MAX_KERNEL_ADDRESS; /* Allocate KVA space for page zero/copy operations. */ zero_page_va = virtual_avail; virtual_avail += PAGE_SIZE; zero_page_idle_va = virtual_avail; virtual_avail += PAGE_SIZE; copy_page_src_va = virtual_avail; virtual_avail += PAGE_SIZE; copy_page_dst_va = virtual_avail; virtual_avail += PAGE_SIZE; debugf("zero_page_va = 0x%08x\n", zero_page_va); debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); /* Initialize page zero/copy mutexes. */ mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); /* Allocate KVA space for ptbl bufs. */ ptbl_buf_pool_vabase = virtual_avail; virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", ptbl_buf_pool_vabase, virtual_avail); /* Calculate corresponding physical addresses for the kernel region. */ phys_kernelend = kernload + kernsize; debugf("kernel image and allocated data:\n"); debugf(" kernload = 0x%08x\n", kernload); debugf(" kernstart = 0x%08x\n", kernstart); debugf(" kernsize = 0x%08x\n", kernsize); if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) panic("mmu_booke_bootstrap: phys_avail too small"); /* * Remove kernel physical address range from avail regions list. Page * align all regions. Non-page aligned memory isn't very interesting * to us. Also, sort the entries for ascending addresses. */ /* Retrieve phys/avail mem regions */ mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions, &availmem_regions_sz); sz = 0; cnt = availmem_regions_sz; debugf("processing avail regions:\n"); for (mp = availmem_regions; mp->mr_size; mp++) { s = mp->mr_start; e = mp->mr_start + mp->mr_size; debugf(" %08x-%08x -> ", s, e); /* Check whether this region holds all of the kernel. */ if (s < kernload && e > phys_kernelend) { availmem_regions[cnt].mr_start = phys_kernelend; availmem_regions[cnt++].mr_size = e - phys_kernelend; e = kernload; } /* Look whether this regions starts within the kernel. */ if (s >= kernload && s < phys_kernelend) { if (e <= phys_kernelend) goto empty; s = phys_kernelend; } /* Now look whether this region ends within the kernel. */ if (e > kernload && e <= phys_kernelend) { if (s >= kernload) goto empty; e = kernload; } /* Now page align the start and size of the region. */ s = round_page(s); e = trunc_page(e); if (e < s) e = s; sz = e - s; debugf("%08x-%08x = %x\n", s, e, sz); /* Check whether some memory is left here. */ if (sz == 0) { empty: memmove(mp, mp + 1, (cnt - (mp - availmem_regions)) * sizeof(*mp)); cnt--; mp--; continue; } /* Do an insertion sort. */ for (mp1 = availmem_regions; mp1 < mp; mp1++) if (s < mp1->mr_start) break; if (mp1 < mp) { memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); mp1->mr_start = s; mp1->mr_size = sz; } else { mp->mr_start = s; mp->mr_size = sz; } } availmem_regions_sz = cnt; /*******************************************************/ /* Steal physical memory for kernel stack from the end */ /* of the first avail region */ /*******************************************************/ kstack0_sz = KSTACK_PAGES * PAGE_SIZE; kstack0_phys = availmem_regions[0].mr_start + availmem_regions[0].mr_size; kstack0_phys -= kstack0_sz; availmem_regions[0].mr_size -= kstack0_sz; /*******************************************************/ /* Fill in phys_avail table, based on availmem_regions */ /*******************************************************/ phys_avail_count = 0; physsz = 0; hwphyssz = 0; TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); debugf("fill in phys_avail:\n"); for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", availmem_regions[i].mr_start, availmem_regions[i].mr_start + availmem_regions[i].mr_size, availmem_regions[i].mr_size); if (hwphyssz != 0 && (physsz + availmem_regions[i].mr_size) >= hwphyssz) { debugf(" hw.physmem adjust\n"); if (physsz < hwphyssz) { phys_avail[j] = availmem_regions[i].mr_start; phys_avail[j + 1] = availmem_regions[i].mr_start + hwphyssz - physsz; physsz = hwphyssz; phys_avail_count++; } break; } phys_avail[j] = availmem_regions[i].mr_start; phys_avail[j + 1] = availmem_regions[i].mr_start + availmem_regions[i].mr_size; phys_avail_count++; physsz += availmem_regions[i].mr_size; } physmem = btoc(physsz); /* Calculate the last available physical address. */ for (i = 0; phys_avail[i + 2] != 0; i += 2) ; Maxmem = powerpc_btop(phys_avail[i + 1]); debugf("Maxmem = 0x%08lx\n", Maxmem); debugf("phys_avail_count = %d\n", phys_avail_count); debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, physmem); /*******************************************************/ /* Initialize (statically allocated) kernel pmap. */ /*******************************************************/ PMAP_LOCK_INIT(kernel_pmap); kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); debugf("kernel pdir range: 0x%08x - 0x%08x\n", kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); /* Initialize kernel pdir */ for (i = 0; i < kernel_ptbls; i++) kernel_pmap->pm_pdir[kptbl_min + i] = (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); for (i = 0; i < MAXCPU; i++) { kernel_pmap->pm_tid[i] = TID_KERNEL; /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ tidbusy[i][0] = kernel_pmap; } /* * Fill in PTEs covering kernel code and data. They are not required * for address translation, as this area is covered by static TLB1 * entries, but for pte_vatopa() to work correctly with kernel area * addresses. */ for (va = kernstart; va < data_end; va += PAGE_SIZE) { pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); pte->rpn = kernload + (va - kernstart); pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; } /* Mark kernel_pmap active on all CPUs */ CPU_FILL(&kernel_pmap->pm_active); /* * Initialize the global pv list lock. */ rw_init(&pvh_global_lock, "pmap pv global"); /*******************************************************/ /* Final setup */ /*******************************************************/ /* Enter kstack0 into kernel map, provide guard page */ kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; thread0.td_kstack = kstack0; thread0.td_kstack_pages = KSTACK_PAGES; debugf("kstack_sz = 0x%08x\n", kstack0_sz); debugf("kstack0_phys at 0x%08x - 0x%08x\n", kstack0_phys, kstack0_phys + kstack0_sz); debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; for (i = 0; i < KSTACK_PAGES; i++) { mmu_booke_kenter(mmu, kstack0, kstack0_phys); kstack0 += PAGE_SIZE; kstack0_phys += PAGE_SIZE; } pmap_bootstrapped = 1; debugf("virtual_avail = %08x\n", virtual_avail); debugf("virtual_end = %08x\n", virtual_end); debugf("mmu_booke_bootstrap: exit\n"); } #ifdef SMP void pmap_bootstrap_ap(volatile uint32_t *trcp __unused) { int i; /* * Finish TLB1 configuration: the BSP already set up its TLB1 and we * have the snapshot of its contents in the s/w tlb1[] table, so use * these values directly to (re)program AP's TLB1 hardware. */ for (i = bp_ntlb1s; i < tlb1_idx; i++) { /* Skip invalid entries */ if (!(tlb1[i].mas1 & MAS1_VALID)) continue; tlb1_write_entry(i); } set_mas4_defaults(); } #endif /* * Get the physical page address for the given pmap/virtual address. */ static vm_paddr_t mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) { vm_paddr_t pa; PMAP_LOCK(pmap); pa = pte_vatopa(mmu, pmap, va); PMAP_UNLOCK(pmap); return (pa); } /* * Extract the physical page address associated with the given * kernel virtual address. */ static vm_paddr_t mmu_booke_kextract(mmu_t mmu, vm_offset_t va) { int i; /* Check TLB1 mappings */ for (i = 0; i < tlb1_idx; i++) { if (!(tlb1[i].mas1 & MAS1_VALID)) continue; if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size) return (tlb1[i].phys + (va - tlb1[i].virt)); } return (pte_vatopa(mmu, kernel_pmap, va)); } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ static void mmu_booke_init(mmu_t mmu) { int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); uma_zone_reserve_kva(pvzone, pv_entry_max); /* Pre-fill pvzone with initial number of pv entries. */ uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); /* Initialize ptbl allocation. */ ptbl_init(); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ static void mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; va = sva; while (count-- > 0) { mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); va += PAGE_SIZE; m++; } } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by mmu_booke_qenter. */ static void mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) { vm_offset_t va; va = sva; while (count-- > 0) { mmu_booke_kremove(mmu, va); va += PAGE_SIZE; } } /* * Map a wired page into kernel virtual address space. */ static void mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) { mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); } static void mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); uint32_t flags; pte_t *pte; KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; flags |= tlb_calc_wimg(pa, ma); pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); if (PTE_ISVALID(pte)) { CTR1(KTR_PMAP, "%s: replacing entry!", __func__); /* Flush entry from TLB0 */ tlb0_flush_entry(va); } pte->rpn = pa & ~PTE_PA_MASK; pte->flags = flags; //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); /* Flush the real memory from the instruction cache. */ if ((flags & (PTE_I | PTE_G)) == 0) { __syncicache((void *)va, PAGE_SIZE); } tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } /* * Remove a page from kernel page table. */ static void mmu_booke_kremove(mmu_t mmu, vm_offset_t va) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); pte_t *pte; // CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kremove: invalid va")); pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); if (!PTE_ISVALID(pte)) { CTR1(KTR_PMAP, "%s: invalid pte", __func__); return; } mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); /* Invalidate entry in TLB0, update PTE. */ tlb0_flush_entry(va); pte->flags = 0; pte->rpn = 0; tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } /* * Initialize pmap associated with process 0. */ static void mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) { PMAP_LOCK_INIT(pmap); mmu_booke_pinit(mmu, pmap); PCPU_SET(curpmap, pmap); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ static void mmu_booke_pinit(mmu_t mmu, pmap_t pmap) { int i; CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, curthread->td_proc->p_pid, curthread->td_proc->p_comm); KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); for (i = 0; i < MAXCPU; i++) pmap->pm_tid[i] = TID_NONE; CPU_ZERO(&kernel_pmap->pm_active); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); TAILQ_INIT(&pmap->pm_ptbl_list); } /* * Release any resources held by the given physical map. * Called when a pmap initialized by mmu_booke_pinit is being released. * Should only be called if the map contains no valid mappings. */ static void mmu_booke_release(mmu_t mmu, pmap_t pmap) { KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); } /* * Insert the given physical page at the specified virtual address in the * target physical map with the protection requested. If specified the page * will be wired down. */ static int mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind) { int error; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); return (error); } static int mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) { pte_t *pte; vm_paddr_t pa; uint32_t flags; int error, su, sync; pa = VM_PAGE_TO_PHYS(m); su = (pmap == kernel_pmap); sync = 0; //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " // "pa=0x%08x prot=0x%08x flags=%#x)\n", // (u_int32_t)pmap, su, pmap->pm_tid, // (u_int32_t)m, va, pa, prot, flags); if (su) { KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_enter_locked: kernel pmap, non kernel va")); } else { KASSERT((va <= VM_MAXUSER_ADDRESS), ("mmu_booke_enter_locked: user pmap, non user va")); } if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) VM_OBJECT_ASSERT_LOCKED(m->object); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* * If there is an existing mapping, and the physical address has not * changed, must be protection or wiring change. */ if (((pte = pte_find(mmu, pmap, va)) != NULL) && (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { /* * Before actually updating pte->flags we calculate and * prepare its new value in a helper var. */ flags = pte->flags; flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); /* Wiring change, just update stats. */ if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { if (!PTE_ISWIRED(pte)) { flags |= PTE_WIRED; pmap->pm_stats.wired_count++; } } else { if (PTE_ISWIRED(pte)) { flags &= ~PTE_WIRED; pmap->pm_stats.wired_count--; } } if (prot & VM_PROT_WRITE) { /* Add write permissions. */ flags |= PTE_SW; if (!su) flags |= PTE_UW; if ((flags & PTE_MANAGED) != 0) vm_page_aflag_set(m, PGA_WRITEABLE); } else { /* Handle modified pages, sense modify status. */ /* * The PTE_MODIFIED flag could be set by underlying * TLB misses since we last read it (above), possibly * other CPUs could update it so we check in the PTE * directly rather than rely on that saved local flags * copy. */ if (PTE_ISMODIFIED(pte)) vm_page_dirty(m); } if (prot & VM_PROT_EXECUTE) { flags |= PTE_SX; if (!su) flags |= PTE_UX; /* * Check existing flags for execute permissions: if we * are turning execute permissions on, icache should * be flushed. */ if ((pte->flags & (PTE_UX | PTE_SX)) == 0) sync++; } flags &= ~PTE_REFERENCED; /* * The new flags value is all calculated -- only now actually * update the PTE. */ mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(va); pte->flags = flags; tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } else { /* * If there is an existing mapping, but it's for a different * physical address, pte_enter() will delete the old mapping. */ //if ((pte != NULL) && PTE_ISVALID(pte)) // debugf("mmu_booke_enter_locked: replace\n"); //else // debugf("mmu_booke_enter_locked: new\n"); /* Now set up the flags and install the new mapping. */ flags = (PTE_SR | PTE_VALID); flags |= PTE_M; if (!su) flags |= PTE_UR; if (prot & VM_PROT_WRITE) { flags |= PTE_SW; if (!su) flags |= PTE_UW; if ((m->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(m, PGA_WRITEABLE); } if (prot & VM_PROT_EXECUTE) { flags |= PTE_SX; if (!su) flags |= PTE_UX; } /* If its wired update stats. */ if ((pmap_flags & PMAP_ENTER_WIRED) != 0) flags |= PTE_WIRED; error = pte_enter(mmu, pmap, m, va, flags, (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); if (error != 0) return (KERN_RESOURCE_SHORTAGE); if ((flags & PMAP_ENTER_WIRED) != 0) pmap->pm_stats.wired_count++; /* Flush the real memory from the instruction cache. */ if (prot & VM_PROT_EXECUTE) sync++; } if (sync && (su || pmap == PCPU_GET(curpmap))) { __syncicache((void *)va, PAGE_SIZE); sync = 0; } return (KERN_SUCCESS); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ static void mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { vm_page_t m; vm_pindex_t diff, psize; VM_OBJECT_ASSERT_LOCKED(m_start->object); psize = atop(end - start); m = m_start; rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); m = TAILQ_NEXT(m, listq); } rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } static void mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); mmu_booke_enter_locked(mmu, pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); rw_wunlock(&pvh_global_lock); PMAP_UNLOCK(pmap); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly rounded to the page size. */ static void mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) { pte_t *pte; uint8_t hold_flag; int su = (pmap == kernel_pmap); //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); if (su) { KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_remove: kernel pmap, non kernel va")); } else { KASSERT((va <= VM_MAXUSER_ADDRESS), ("mmu_booke_remove: user pmap, non user va")); } if (PMAP_REMOVE_DONE(pmap)) { //debugf("mmu_booke_remove: e (empty)\n"); return; } hold_flag = PTBL_HOLD_FLAG(pmap); //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); rw_wlock(&pvh_global_lock); PMAP_LOCK(pmap); for (; va < endva; va += PAGE_SIZE) { pte = pte_find(mmu, pmap, va); if ((pte != NULL) && PTE_ISVALID(pte)) pte_remove(mmu, pmap, va, hold_flag); } PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); //debugf("mmu_booke_remove: e\n"); } /* * Remove physical page from all pmaps in which it resides. */ static void mmu_booke_remove_all(mmu_t mmu, vm_page_t m) { pv_entry_t pv, pvn; uint8_t hold_flag; rw_wlock(&pvh_global_lock); for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { pvn = TAILQ_NEXT(pv, pv_link); PMAP_LOCK(pv->pv_pmap); hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); PMAP_UNLOCK(pv->pv_pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); } /* * Map a range of physical addresses into kernel virtual address space. */ static vm_offset_t mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, vm_paddr_t pa_end, int prot) { vm_offset_t sva = *virt; vm_offset_t va = sva; //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", // sva, pa_start, pa_end); while (pa_start < pa_end) { mmu_booke_kenter(mmu, va, pa_start); va += PAGE_SIZE; pa_start += PAGE_SIZE; } *virt = va; //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); return (sva); } /* * The pmap must be activated before it's address space can be accessed in any * way. */ static void mmu_booke_activate(mmu_t mmu, struct thread *td) { pmap_t pmap; u_int cpuid; pmap = &td->td_proc->p_vmspace->vm_pmap; CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); sched_pin(); cpuid = PCPU_GET(cpuid); CPU_SET_ATOMIC(cpuid, &pmap->pm_active); PCPU_SET(curpmap, pmap); if (pmap->pm_tid[cpuid] == TID_NONE) tid_alloc(pmap); /* Load PID0 register with pmap tid value. */ mtspr(SPR_PID0, pmap->pm_tid[cpuid]); __asm __volatile("isync"); mtspr(SPR_DBCR0, td->td_pcb->pcb_cpu.booke.dbcr0); sched_unpin(); CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); } /* * Deactivate the specified process's address space. */ static void mmu_booke_deactivate(mmu_t mmu, struct thread *td) { pmap_t pmap; pmap = &td->td_proc->p_vmspace->vm_pmap; CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); td->td_pcb->pcb_cpu.booke.dbcr0 = mfspr(SPR_DBCR0); CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); PCPU_SET(curpmap, NULL); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ static void mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * Set the physical protection on the specified range of this map as requested. */ static void mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { vm_offset_t va; vm_page_t m; pte_t *pte; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { mmu_booke_remove(mmu, pmap, sva, eva); return; } if (prot & VM_PROT_WRITE) return; PMAP_LOCK(pmap); for (va = sva; va < eva; va += PAGE_SIZE) { if ((pte = pte_find(mmu, pmap, va)) != NULL) { if (PTE_ISVALID(pte)) { m = PHYS_TO_VM_PAGE(PTE_PA(pte)); mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); /* Handle modified pages. */ if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) vm_page_dirty(m); tlb0_flush_entry(va); pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } } } PMAP_UNLOCK(pmap); } /* * Clear the write and modified bits in each of the given page's mappings. */ static void mmu_booke_remove_write(mmu_t mmu, vm_page_t m) { pv_entry_t pv; pte_t *pte; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_remove_write: page %p is not managed", m)); /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * set by another thread while the object is locked. Thus, * if PGA_WRITEABLE is clear, no page table entries need updating. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { if (PTE_ISVALID(pte)) { m = PHYS_TO_VM_PAGE(PTE_PA(pte)); mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); /* Handle modified pages. */ if (PTE_ISMODIFIED(pte)) vm_page_dirty(m); /* Flush mapping from TLB0. */ pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } } PMAP_UNLOCK(pv->pv_pmap); } vm_page_aflag_clear(m, PGA_WRITEABLE); rw_wunlock(&pvh_global_lock); } static void mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) { pte_t *pte; pmap_t pmap; vm_page_t m; vm_offset_t addr; vm_paddr_t pa = 0; int active, valid; va = trunc_page(va); sz = round_page(sz); rw_wlock(&pvh_global_lock); pmap = PCPU_GET(curpmap); active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; while (sz > 0) { PMAP_LOCK(pm); pte = pte_find(mmu, pm, va); valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; if (valid) pa = PTE_PA(pte); PMAP_UNLOCK(pm); if (valid) { if (!active) { /* Create a mapping in the active pmap. */ addr = 0; m = PHYS_TO_VM_PAGE(pa); PMAP_LOCK(pmap); pte_enter(mmu, pmap, m, addr, PTE_SR | PTE_VALID | PTE_UR, FALSE); __syncicache((void *)addr, PAGE_SIZE); pte_remove(mmu, pmap, addr, PTBL_UNHOLD); PMAP_UNLOCK(pmap); } else __syncicache((void *)va, PAGE_SIZE); } va += PAGE_SIZE; sz -= PAGE_SIZE; } rw_wunlock(&pvh_global_lock); } /* * Atomically extract and hold the physical page with the given * pmap and virtual address pair if that mapping permits the given * protection. */ static vm_page_t mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) { pte_t *pte; vm_page_t m; uint32_t pte_wbit; vm_paddr_t pa; m = NULL; pa = 0; PMAP_LOCK(pmap); retry: pte = pte_find(mmu, pmap, va); if ((pte != NULL) && PTE_ISVALID(pte)) { if (pmap == kernel_pmap) pte_wbit = PTE_SW; else pte_wbit = PTE_UW; if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) goto retry; m = PHYS_TO_VM_PAGE(PTE_PA(pte)); vm_page_hold(m); } } PA_UNLOCK_COND(pa); PMAP_UNLOCK(pmap); return (m); } /* * Initialize a vm_page's machine-dependent fields. */ static void mmu_booke_page_init(mmu_t mmu, vm_page_t m) { TAILQ_INIT(&m->md.pv_list); } /* * mmu_booke_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. * * off and size must reside within a single page. */ static void mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) { vm_offset_t va; /* XXX KASSERT off and size are within a single page? */ mtx_lock(&zero_page_mutex); va = zero_page_va; mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); bzero((caddr_t)va + off, size); mmu_booke_kremove(mmu, va); mtx_unlock(&zero_page_mutex); } /* * mmu_booke_zero_page zeros the specified hardware page. */ static void mmu_booke_zero_page(mmu_t mmu, vm_page_t m) { mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); } /* * mmu_booke_copy_page copies the specified (machine independent) page by * mapping the page into virtual memory and using memcopy to copy the page, * one machine dependent page at a time. */ static void mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) { vm_offset_t sva, dva; sva = copy_page_src_va; dva = copy_page_dst_va; mtx_lock(©_page_mutex); mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); mmu_booke_kremove(mmu, dva); mmu_booke_kremove(mmu, sva); mtx_unlock(©_page_mutex); } static inline void mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, vm_page_t *mb, vm_offset_t b_offset, int xfersize) { void *a_cp, *b_cp; vm_offset_t a_pg_offset, b_pg_offset; int cnt; mtx_lock(©_page_mutex); while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); mmu_booke_kenter(mmu, copy_page_src_va, VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); a_cp = (char *)copy_page_src_va + a_pg_offset; b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); mmu_booke_kenter(mmu, copy_page_dst_va, VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); b_cp = (char *)copy_page_dst_va + b_pg_offset; bcopy(a_cp, b_cp, cnt); mmu_booke_kremove(mmu, copy_page_dst_va); mmu_booke_kremove(mmu, copy_page_src_va); a_offset += cnt; b_offset += cnt; xfersize -= cnt; } mtx_unlock(©_page_mutex); } /* * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it * into virtual memory and using bzero to clear its contents. This is intended * to be called from the vm_pagezero process only and outside of Giant. No * lock is required. */ static void mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) { vm_offset_t va; va = zero_page_idle_va; mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); bzero((caddr_t)va, PAGE_SIZE); mmu_booke_kremove(mmu, va); } /* * Return whether or not the specified physical page was modified * in any of physical maps. */ static boolean_t mmu_booke_is_modified(mmu_t mmu, vm_page_t m) { pte_t *pte; pv_entry_t pv; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_is_modified: page %p is not managed", m)); rv = FALSE; /* * If the page is not exclusive busied, then PGA_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PGA_WRITEABLE * is clear, no PTEs can be modified. */ VM_OBJECT_ASSERT_WLOCKED(m->object); if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) return (rv); rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { if (PTE_ISMODIFIED(pte)) rv = TRUE; } PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } rw_wunlock(&pvh_global_lock); return (rv); } /* * Return whether or not the specified virtual address is eligible * for prefault. */ static boolean_t mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) { return (FALSE); } /* * Return whether or not the specified physical page was referenced * in any physical maps. */ static boolean_t mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) { pte_t *pte; pv_entry_t pv; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_is_referenced: page %p is not managed", m)); rv = FALSE; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { if (PTE_ISREFERENCED(pte)) rv = TRUE; } PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } rw_wunlock(&pvh_global_lock); return (rv); } /* * Clear the modify bits on the specified physical page. */ static void mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) { pte_t *pte; pv_entry_t pv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_clear_modify: page %p is not managed", m)); VM_OBJECT_ASSERT_WLOCKED(m->object); KASSERT(!vm_page_xbusied(m), ("mmu_booke_clear_modify: page %p is exclusive busied", m)); /* * If the page is not PG_AWRITEABLE, then no PTEs can be modified. * If the object containing the page is locked and the page is not * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. */ if ((m->aflags & PGA_WRITEABLE) == 0) return; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { tlb0_flush_entry(pv->pv_va); pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | PTE_REFERENCED); } tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); } PMAP_UNLOCK(pv->pv_pmap); } rw_wunlock(&pvh_global_lock); } /* * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ static int mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) { pte_t *pte; pv_entry_t pv; int count; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_ts_referenced: page %p is not managed", m)); count = 0; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { if (PTE_ISREFERENCED(pte)) { mtx_lock_spin(&tlbivax_mutex); tlb_miss_lock(); tlb0_flush_entry(pv->pv_va); pte->flags &= ~PTE_REFERENCED; tlb_miss_unlock(); mtx_unlock_spin(&tlbivax_mutex); if (++count > 4) { PMAP_UNLOCK(pv->pv_pmap); break; } } } PMAP_UNLOCK(pv->pv_pmap); } rw_wunlock(&pvh_global_lock); return (count); } /* * Clear the wired attribute from the mappings for the specified range of * addresses in the given pmap. Every valid mapping within that range must * have the wired attribute set. In contrast, invalid mappings cannot have * the wired attribute set, so they are ignored. * * The wired attribute of the page table entry is not a hardware feature, so * there is no need to invalidate any TLB entries. */ static void mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { vm_offset_t va; pte_t *pte; PMAP_LOCK(pmap); for (va = sva; va < eva; va += PAGE_SIZE) { if ((pte = pte_find(mmu, pmap, va)) != NULL && PTE_ISVALID(pte)) { if (!PTE_ISWIRED(pte)) panic("mmu_booke_unwire: pte %p isn't wired", pte); pte->flags &= ~PTE_WIRED; pmap->pm_stats.wired_count--; } } PMAP_UNLOCK(pmap); } /* * Return true if the pmap's pv is one of the first 16 pvs linked to from this * page. This count may be changed upwards or downwards in the future; it is * only necessary that true be returned for a small subset of pmaps for proper * page aging. */ static boolean_t mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops; boolean_t rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { if (pv->pv_pmap == pmap) { rv = TRUE; break; } if (++loops >= 16) break; } rw_wunlock(&pvh_global_lock); return (rv); } /* * Return the number of managed mappings to the given physical page that are * wired. */ static int mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) { pv_entry_t pv; pte_t *pte; int count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) count++; PMAP_UNLOCK(pv->pv_pmap); } rw_wunlock(&pvh_global_lock); return (count); } static int mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) { int i; vm_offset_t va; /* * This currently does not work for entries that * overlap TLB1 entries. */ for (i = 0; i < tlb1_idx; i ++) { if (tlb1_iomapped(i, pa, size, &va) == 0) return (0); } return (EFAULT); } void mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) { vm_paddr_t ppa; vm_offset_t ofs; vm_size_t gran; /* Minidumps are based on virtual memory addresses. */ if (do_minidump) { *va = (void *)pa; return; } /* Raw physical memory dumps don't have a virtual address. */ /* We always map a 256MB page at 256M. */ gran = 256 * 1024 * 1024; ppa = pa & ~(gran - 1); ofs = pa - ppa; *va = (void *)gran; tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO); if (sz > (gran - ofs)) tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran, _TLB_ENTRY_IO); } void mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va) { vm_paddr_t ppa; vm_offset_t ofs; vm_size_t gran; /* Minidumps are based on virtual memory addresses. */ /* Nothing to do... */ if (do_minidump) return; /* Raw physical memory dumps don't have a virtual address. */ tlb1_idx--; tlb1[tlb1_idx].mas1 = 0; tlb1[tlb1_idx].mas2 = 0; tlb1[tlb1_idx].mas3 = 0; tlb1_write_entry(tlb1_idx); gran = 256 * 1024 * 1024; ppa = pa & ~(gran - 1); ofs = pa - ppa; if (sz > (gran - ofs)) { tlb1_idx--; tlb1[tlb1_idx].mas1 = 0; tlb1[tlb1_idx].mas2 = 0; tlb1[tlb1_idx].mas3 = 0; tlb1_write_entry(tlb1_idx); } } extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; void mmu_booke_scan_init(mmu_t mmu) { vm_offset_t va; pte_t *pte; int i; if (!do_minidump) { /* Initialize phys. segments for dumpsys(). */ memset(&dump_map, 0, sizeof(dump_map)); mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions, &availmem_regions_sz); for (i = 0; i < physmem_regions_sz; i++) { dump_map[i].pa_start = physmem_regions[i].mr_start; dump_map[i].pa_size = physmem_regions[i].mr_size; } return; } /* Virtual segments for minidumps: */ memset(&dump_map, 0, sizeof(dump_map)); /* 1st: kernel .data and .bss. */ dump_map[0].pa_start = trunc_page((uintptr_t)_etext); dump_map[0].pa_size = round_page((uintptr_t)_end) - dump_map[0].pa_start; /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ dump_map[1].pa_start = data_start; dump_map[1].pa_size = data_end - data_start; /* 3rd: kernel VM. */ va = dump_map[1].pa_start + dump_map[1].pa_size; /* Find start of next chunk (from va). */ while (va < virtual_end) { /* Don't dump the buffer cache. */ if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { va = kmi.buffer_eva; continue; } pte = pte_find(mmu, kernel_pmap, va); if (pte != NULL && PTE_ISVALID(pte)) break; va += PAGE_SIZE; } if (va < virtual_end) { dump_map[2].pa_start = va; va += PAGE_SIZE; /* Find last page in chunk. */ while (va < virtual_end) { /* Don't run into the buffer cache. */ if (va == kmi.buffer_sva) break; pte = pte_find(mmu, kernel_pmap, va); if (pte == NULL || !PTE_ISVALID(pte)) break; va += PAGE_SIZE; } dump_map[2].pa_size = va - dump_map[2].pa_start; } } /* * Map a set of physical memory pages into the kernel virtual address space. * Return a pointer to where it is mapped. This routine is intended to be used * for mapping device memory, NOT real memory. */ static void * mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) { return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); } static void * mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) { void *res; uintptr_t va; vm_size_t sz; int i; /* * Check if this is premapped in TLB1. Note: this should probably also * check whether a sequence of TLB1 entries exist that match the * requirement, but now only checks the easy case. */ if (ma == VM_MEMATTR_DEFAULT) { for (i = 0; i < tlb1_idx; i++) { if (!(tlb1[i].mas1 & MAS1_VALID)) continue; if (pa >= tlb1[i].phys && (pa + size) <= (tlb1[i].phys + tlb1[i].size)) return (void *)(tlb1[i].virt + (pa - tlb1[i].phys)); } } size = roundup(size, PAGE_SIZE); /* * We leave a hole for device direct mapping between the maximum user * address (0x8000000) and the minimum KVA address (0xc0000000). If * devices are in there, just map them 1:1. If not, map them to the * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped * addresses should be pulled from an allocator, but since we do not * ever free TLB1 entries, it is safe just to increment a counter. * Note that there isn't a lot of address space here (128 MB) and it * is not at all difficult to imagine running out, since that is a 4:1 * compression from the 0xc0000000 - 0xf0000000 address space that gets * mapped there. */ if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) && (pa + size - 1) < VM_MIN_KERNEL_ADDRESS) va = pa; else va = atomic_fetchadd_int(&tlb1_map_base, size); res = (void *)va; do { sz = 1 << (ilog2(size) & ~1); if (bootverbose) printf("Wiring VA=%x to PA=%x (size=%x), " "using TLB1[%d]\n", va, pa, sz, tlb1_idx); tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma)); size -= sz; pa += sz; va += sz; } while (size > 0); return (res); } /* * 'Unmap' a range mapped by mmu_booke_mapdev(). */ static void mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) { #ifdef SUPPORTS_SHRINKING_TLB1 vm_offset_t base, offset; /* * Unmap only if this is inside kernel virtual space. */ if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { base = trunc_page(va); offset = va & PAGE_MASK; size = roundup(offset + size, PAGE_SIZE); kva_free(base, size); } #endif } /* * mmu_booke_object_init_pt preloads the ptes for a given object into the * specified pmap. This eliminates the blast of soft faults on process startup * and immediately after an mmap. */ static void mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("mmu_booke_object_init_pt: non-device object")); } /* * Perform the pmap work for mincore. */ static int mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { /* XXX: this should be implemented at some point */ return (0); } /**************************************************************************/ /* TID handling */ /**************************************************************************/ /* * Allocate a TID. If necessary, steal one from someone else. * The new TID is flushed from the TLB before returning. */ static tlbtid_t tid_alloc(pmap_t pmap) { tlbtid_t tid; int thiscpu; KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); thiscpu = PCPU_GET(cpuid); tid = PCPU_GET(tid_next); if (tid > TID_MAX) tid = TID_MIN; PCPU_SET(tid_next, tid + 1); /* If we are stealing TID then clear the relevant pmap's field */ if (tidbusy[thiscpu][tid] != NULL) { CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; /* Flush all entries from TLB0 matching this TID. */ tid_flush(tid, tlb0_ways, tlb0_entries_per_way); } tidbusy[thiscpu][tid] = pmap; pmap->pm_tid[thiscpu] = tid; __asm __volatile("msync; isync"); CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, PCPU_GET(tid_next)); return (tid); } /**************************************************************************/ /* TLB0 handling */ /**************************************************************************/ static void tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, uint32_t mas7) { int as; char desc[3]; tlbtid_t tid; vm_size_t size; unsigned int tsize; desc[2] = '\0'; if (mas1 & MAS1_VALID) desc[0] = 'V'; else desc[0] = ' '; if (mas1 & MAS1_IPROT) desc[1] = 'P'; else desc[1] = ' '; as = (mas1 & MAS1_TS_MASK) ? 1 : 0; tid = MAS1_GETTID(mas1); tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; size = 0; if (tsize) size = tsize2size(tsize); debugf("%3d: (%s) [AS=%d] " "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); } /* Convert TLB0 va and way number to tlb0[] table index. */ static inline unsigned int tlb0_tableidx(vm_offset_t va, unsigned int way) { unsigned int idx; idx = (way * TLB0_ENTRIES_PER_WAY); idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; return (idx); } /* * Invalidate TLB0 entry. */ static inline void tlb0_flush_entry(vm_offset_t va) { CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); mtx_assert(&tlbivax_mutex, MA_OWNED); __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); __asm __volatile("isync; msync"); __asm __volatile("tlbsync; msync"); CTR1(KTR_PMAP, "%s: e", __func__); } /* Print out contents of the MAS registers for each TLB0 entry */ void tlb0_print_tlbentries(void) { uint32_t mas0, mas1, mas2, mas3, mas7; int entryidx, way, idx; debugf("TLB0 entries:\n"); for (way = 0; way < TLB0_WAYS; way ++) for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); mtspr(SPR_MAS0, mas0); __asm __volatile("isync"); mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; mtspr(SPR_MAS2, mas2); __asm __volatile("isync; tlbre"); mas1 = mfspr(SPR_MAS1); mas2 = mfspr(SPR_MAS2); mas3 = mfspr(SPR_MAS3); mas7 = mfspr(SPR_MAS7); idx = tlb0_tableidx(mas2, way); tlb_print_entry(idx, mas1, mas2, mas3, mas7); } } /**************************************************************************/ /* TLB1 handling */ /**************************************************************************/ /* * TLB1 mapping notes: * * TLB1[0] Kernel text and data. * TLB1[1-15] Additional kernel text and data mappings (if required), PCI * windows, other devices mappings. */ /* * Write given entry to TLB1 hardware. * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). */ static void tlb1_write_entry(unsigned int idx) { uint32_t mas0, mas7; //debugf("tlb1_write_entry: s\n"); /* Clear high order RPN bits */ mas7 = 0; /* Select entry */ mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); mtspr(SPR_MAS0, mas0); __asm __volatile("isync"); mtspr(SPR_MAS1, tlb1[idx].mas1); __asm __volatile("isync"); mtspr(SPR_MAS2, tlb1[idx].mas2); __asm __volatile("isync"); mtspr(SPR_MAS3, tlb1[idx].mas3); __asm __volatile("isync"); mtspr(SPR_MAS7, mas7); __asm __volatile("isync; tlbwe; isync; msync"); //debugf("tlb1_write_entry: e\n"); } /* * Return the largest uint value log such that 2^log <= num. */ static unsigned int ilog2(unsigned int num) { int lz; __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); return (31 - lz); } /* * Convert TLB TSIZE value to mapped region size. */ static vm_size_t tsize2size(unsigned int tsize) { /* * size = 4^tsize KB * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) */ return ((1 << (2 * tsize)) * 1024); } /* * Convert region size (must be power of 4) to TLB TSIZE value. */ static unsigned int size2tsize(vm_size_t size) { return (ilog2(size) / 2 - 5); } /* * Register permanent kernel mapping in TLB1. * * Entries are created starting from index 0 (current free entry is * kept in tlb1_idx) and are not supposed to be invalidated. */ static int tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, uint32_t flags) { uint32_t ts, tid; int tsize, index; index = atomic_fetchadd_int(&tlb1_idx, 1); if (index >= TLB1_ENTRIES) { printf("tlb1_set_entry: TLB1 full!\n"); return (-1); } /* Convert size to TSIZE */ tsize = size2tsize(size); tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; /* XXX TS is hard coded to 0 for now as we only use single address space */ ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; /* * Atomicity is preserved by the atomic increment above since nothing * is ever removed from tlb1. */ tlb1[index].phys = pa; tlb1[index].virt = va; tlb1[index].size = size; tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags; /* Set supervisor RWX permission bits */ tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; tlb1_write_entry(index); /* * XXX in general TLB1 updates should be propagated between CPUs, * since current design assumes to have the same TLB1 set-up on all * cores. */ return (0); } /* * Map in contiguous RAM region into the TLB1 using maximum of * KERNEL_REGION_MAX_TLB_ENTRIES entries. * * If necessary round up last entry size and return total size * used by all allocated entries. */ vm_size_t tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) { vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; vm_size_t mapped, pgsz, base, mask; int idx, nents; /* Round up to the next 1M */ size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); mapped = 0; idx = 0; base = va; pgsz = 64*1024*1024; while (mapped < size) { while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { while (pgsz > (size - mapped)) pgsz >>= 2; pgs[idx++] = pgsz; mapped += pgsz; } /* We under-map. Correct for this. */ if (mapped < size) { while (pgs[idx - 1] == pgsz) { idx--; mapped -= pgsz; } /* XXX We may increase beyond out starting point. */ pgsz <<= 2; pgs[idx++] = pgsz; mapped += pgsz; } } nents = idx; mask = pgs[0] - 1; /* Align address to the boundary */ if (va & mask) { va = (va + mask) & ~mask; pa = (pa + mask) & ~mask; } for (idx = 0; idx < nents; idx++) { pgsz = pgs[idx]; debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); pa += pgsz; va += pgsz; } mapped = (va - base); printf("mapped size 0x%08x (wasted space 0x%08x)\n", mapped, mapped - size); return (mapped); } /* * TLB1 initialization routine, to be called after the very first * assembler level setup done in locore.S. */ void tlb1_init() { uint32_t mas0, mas1, mas2, mas3; uint32_t tsz; u_int i; if (bootinfo != NULL && bootinfo[0] != 1) { tlb1_idx = *((uint16_t *)(bootinfo + 8)); } else tlb1_idx = 1; /* The first entry/entries are used to map the kernel. */ for (i = 0; i < tlb1_idx; i++) { mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); mtspr(SPR_MAS0, mas0); __asm __volatile("isync; tlbre"); mas1 = mfspr(SPR_MAS1); if ((mas1 & MAS1_VALID) == 0) continue; mas2 = mfspr(SPR_MAS2); mas3 = mfspr(SPR_MAS3); tlb1[i].mas1 = mas1; tlb1[i].mas2 = mfspr(SPR_MAS2); tlb1[i].mas3 = mas3; tlb1[i].virt = mas2 & MAS2_EPN_MASK; tlb1[i].phys = mas3 & MAS3_RPN; if (i == 0) kernload = mas3 & MAS3_RPN; tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0; kernsize += tlb1[i].size; } #ifdef SMP bp_ntlb1s = tlb1_idx; #endif /* Purge the remaining entries */ for (i = tlb1_idx; i < TLB1_ENTRIES; i++) tlb1_write_entry(i); /* Setup TLB miss defaults */ set_mas4_defaults(); } vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size) { vm_paddr_t pa_base; vm_offset_t va, sz; int i; KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!")); for (i = 0; i < tlb1_idx; i++) { if (!(tlb1[i].mas1 & MAS1_VALID)) continue; if (pa >= tlb1[i].phys && (pa + size) <= (tlb1[i].phys + tlb1[i].size)) return (tlb1[i].virt + (pa - tlb1[i].phys)); } pa_base = trunc_page(pa); size = roundup(size + (pa - pa_base), PAGE_SIZE); tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1)); va = tlb1_map_base + (pa - pa_base); do { sz = 1 << (ilog2(size) & ~1); tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO); size -= sz; pa_base += sz; tlb1_map_base += sz; } while (size > 0); #ifdef SMP bp_ntlb1s = tlb1_idx; #endif return (va); } /* * Setup MAS4 defaults. * These values are loaded to MAS0-2 on a TLB miss. */ static void set_mas4_defaults(void) { uint32_t mas4; /* Defaults: TLB0, PID0, TSIZED=4K */ mas4 = MAS4_TLBSELD0; mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; #ifdef SMP mas4 |= MAS4_MD; #endif mtspr(SPR_MAS4, mas4); __asm __volatile("isync"); } /* * Print out contents of the MAS registers for each TLB1 entry */ void tlb1_print_tlbentries(void) { uint32_t mas0, mas1, mas2, mas3, mas7; int i; debugf("TLB1 entries:\n"); for (i = 0; i < TLB1_ENTRIES; i++) { mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); mtspr(SPR_MAS0, mas0); __asm __volatile("isync; tlbre"); mas1 = mfspr(SPR_MAS1); mas2 = mfspr(SPR_MAS2); mas3 = mfspr(SPR_MAS3); mas7 = mfspr(SPR_MAS7); tlb_print_entry(i, mas1, mas2, mas3, mas7); } } /* * Print out contents of the in-ram tlb1 table. */ void tlb1_print_entries(void) { int i; debugf("tlb1[] table entries:\n"); for (i = 0; i < TLB1_ENTRIES; i++) tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); } /* * Return 0 if the physical IO range is encompassed by one of the * the TLB1 entries, otherwise return related error code. */ static int tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) { uint32_t prot; vm_paddr_t pa_start; vm_paddr_t pa_end; unsigned int entry_tsize; vm_size_t entry_size; *va = (vm_offset_t)NULL; /* Skip invalid entries */ if (!(tlb1[i].mas1 & MAS1_VALID)) return (EINVAL); /* * The entry must be cache-inhibited, guarded, and r/w * so it can function as an i/o page */ prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); if (prot != (MAS2_I | MAS2_G)) return (EPERM); prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); if (prot != (MAS3_SR | MAS3_SW)) return (EPERM); /* The address should be within the entry range. */ entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); entry_size = tsize2size(entry_tsize); pa_start = tlb1[i].mas3 & MAS3_RPN; pa_end = pa_start + entry_size - 1; if ((pa < pa_start) || ((pa + size) > pa_end)) return (ERANGE); /* Return virtual address of this mapping. */ *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); return (0); } Index: head/sys/powerpc/conf/GENERIC =================================================================== --- head/sys/powerpc/conf/GENERIC (revision 282263) +++ head/sys/powerpc/conf/GENERIC (revision 282264) @@ -1,214 +1,215 @@ # # GENERIC -- Generic kernel configuration file for FreeBSD/powerpc # # For more information on this file, please read the handbook section on # Kernel Configuration Files: # # http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../conf/NOTES and NOTES files. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. # # $FreeBSD$ cpu AIM ident GENERIC machine powerpc powerpc makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols makeoptions WITH_CTF=1 # Platform support options POWERMAC #NewWorld Apple PowerMacs options PSIM #GDB PSIM ppc simulator options MAMBO #IBM Mambo Full System Simulator options PSERIES #PAPR-compliant systems +options FDT options SCHED_ULE #ULE scheduler options PREEMPTION #Enable kernel thread preemption options INET #InterNETworking options INET6 #IPv6 communications protocols options SCTP #Stream Control Transmission Protocol options FFS #Berkeley Fast Filesystem options SOFTUPDATES #Enable FFS soft updates support options UFS_ACL #Support for access control lists options UFS_DIRHASH #Improve performance on big directories options UFS_GJOURNAL #Enable gjournal-based UFS journaling options QUOTA #Enable disk quotas for UFS options MD_ROOT #MD is a potential root device options NFSCL #Network Filesystem Client options NFSD #Network Filesystem Server options NFSLOCKD #Network Lock Manager options NFS_ROOT #NFS usable as root device options MSDOSFS #MSDOS Filesystem options CD9660 #ISO 9660 Filesystem options PROCFS #Process filesystem (requires PSEUDOFS) options PSEUDOFS #Pseudo-filesystem framework options GEOM_PART_APM #Apple Partition Maps. options GEOM_PART_GPT #GUID Partition Tables. options GEOM_LABEL #Provides labelization options COMPAT_FREEBSD4 #Keep this for a while options COMPAT_FREEBSD5 #Compatible with FreeBSD5 options COMPAT_FREEBSD6 #Compatible with FreeBSD6 options COMPAT_FREEBSD7 #Compatible with FreeBSD7 options COMPAT_FREEBSD9 # Compatible with FreeBSD9 options COMPAT_FREEBSD10 # Compatible with FreeBSD10 options SCSI_DELAY=5000 #Delay (in ms) before probing SCSI options KTRACE #ktrace(1) syscall trace support options STACK #stack(9) support options SYSVSHM #SYSV-style shared memory options SYSVMSG #SYSV-style message queues options SYSVSEM #SYSV-style semaphores options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) options AUDIT # Security event auditing options CAPABILITY_MODE # Capsicum capability mode options CAPABILITIES # Capsicum capabilities options MAC # TrustedBSD MAC Framework options KDTRACE_HOOKS # Kernel DTrace hooks options DDB_CTF # Kernel ELF linker loads CTF data options INCLUDE_CONFIG_FILE # Include this file in kernel # Debugging support. Always need this: options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): options DDB #Support DDB #options DEADLKRES #Enable the deadlock resolver options INVARIANTS #Enable calls of extra sanity checking options INVARIANT_SUPPORT #Extra sanity checks of internal structures, required by INVARIANTS options WITNESS #Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN #Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel # CPU frequency control device cpufreq # Standard busses device pci device agp # ATA controllers device ahci # AHCI-compatible SATA controllers device ata # Legacy ATA/SATA controllers device mvs # Marvell 88SX50XX/88SX60XX/88SX70XX/SoC SATA device siis # SiliconImage SiI3124/SiI3132/SiI3531 SATA # SCSI Controllers device ahc # AHA2940 and onboard AIC7xxx devices options AHC_ALLOW_MEMIO # Attempt to use memory mapped I/O options AHC_REG_PRETTY_PRINT # Print register bitfields in debug # output. Adds ~128k to driver. device isp # Qlogic family device ispfw # Firmware module for Qlogic host adapters device mpt # LSI-Logic MPT-Fusion device mps # LSI-Logic MPT-Fusion 2 device sym # NCR/Symbios/LSI Logic 53C8XX/53C1010/53C1510D # ATA/SCSI peripherals device scbus # SCSI bus (required for ATA/SCSI) device da # Direct Access (disks) device sa # Sequential Access (tape etc) device cd # CD device pass # Passthrough device (direct ATA/SCSI access) # vt is the default console driver, resembling an SCO console device vt # Generic console driver (pulls in OF FB) device kbdmux # Serial (COM) ports device scc device uart device uart_z8530 # FireWire support device firewire # FireWire bus code device sbp # SCSI over FireWire (Requires scbus and da) device fwe # Ethernet over FireWire (non-standard!) # PCI Ethernet NICs that use the common MII bus controller code. device miibus # MII bus support device bge # Broadcom BCM570xx Gigabit Ethernet device bm # Apple BMAC Ethernet device gem # Sun GEM/Sun ERI/Apple GMAC device dc # DEC/Intel 21143 and various workalikes device fxp # Intel EtherExpress PRO/100B (82557, 82558) # Pseudo devices. device loop # Network loopback device random # Entropy device device ether # Ethernet support device vlan # 802.1Q VLAN support device tun # Packet tunnel. device md # Memory "disks" device ofwd # Open Firmware disks device gif # IPv6 and IPv4 tunneling device firmware # firmware assist module # The `bpf' device enables the Berkeley Packet Filter. # Be aware of the administrative consequences of enabling this! # Note that 'bpf' is required for DHCP. device bpf #Berkeley packet filter # USB support options USB_DEBUG # enable debug msgs device uhci # UHCI PCI->USB interface device ohci # OHCI PCI->USB interface device ehci # EHCI PCI->USB interface device usb # USB Bus (required) device uhid # "Human Interface Devices" device ukbd # Keyboard options KBD_INSTALL_CDEV # install a CDEV entry in /dev device ulpt # Printer device umass # Disks/Mass storage - Requires scbus and da0 device ums # Mouse device atp # Apple USB touchpad device urio # Diamond Rio 500 MP3 player # USB Ethernet device aue # ADMtek USB Ethernet device axe # ASIX Electronics USB Ethernet device cdce # Generic USB over Ethernet device cue # CATC USB Ethernet device kue # Kawasaki LSI USB Ethernet # Wireless NIC cards options IEEE80211_SUPPORT_MESH options AH_SUPPORT_AR5416 # Misc device iicbus # I2C bus code device kiic # Keywest I2C device ad7417 # PowerMac7,2 temperature sensor device adt746x # PowerBook5,8 temperature sensor device ds1631 # PowerMac11,2 temperature sensor device ds1775 # PowerMac7,2 temperature sensor device fcu # Apple Fan Control Unit device max6690 # PowerMac7,2 temperature sensor device powermac_nvram # Open Firmware configuration NVRAM device smu # Apple System Management Unit device adm1030 # Apple G4 MDD fan controller device atibl # ATI-based backlight driver for PowerBooks/iBooks device nvbl # nVidia-based backlight driver for PowerBooks/iBooks # ADB support device adb device cuda device pmu # Sound support device sound # Generic sound driver (required) device snd_ai2s # Apple I2S audio device snd_davbus # Apple DAVBUS audio device snd_uaudio # USB Audio Index: head/sys/powerpc/powerpc/machdep.c =================================================================== --- head/sys/powerpc/powerpc/machdep.c (nonexistent) +++ head/sys/powerpc/powerpc/machdep.c (revision 282264) @@ -0,0 +1,502 @@ +/*- + * Copyright (C) 1995, 1996 Wolfgang Solfrank. + * Copyright (C) 1995, 1996 TooLs GmbH. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by TooLs GmbH. + * 4. The name of TooLs GmbH may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Copyright (C) 2001 Benno Rice + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; + * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * $NetBSD: machdep.c,v 1.74.2.1 2000/11/01 16:13:48 tv Exp $ + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_compat.h" +#include "opt_ddb.h" +#include "opt_kstack_pages.h" +#include "opt_platform.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#ifndef __powerpc64__ +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +int cold = 1; +#ifdef __powerpc64__ +int cacheline_size = 128; +#else +int cacheline_size = 32; +#endif +int hw_direct_map = 1; + +extern void *ap_pcpu; + +struct pcpu __pcpu[MAXCPU]; + +static struct trapframe frame0; + +char machine[] = "powerpc"; +SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); + +static void cpu_startup(void *); +SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); + +SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size, + CTLFLAG_RD, &cacheline_size, 0, ""); + +uintptr_t powerpc_init(vm_offset_t, vm_offset_t, vm_offset_t, void *); + +long Maxmem = 0; +long realmem = 0; + +struct kva_md_info kmi; + +static void +cpu_startup(void *dummy) +{ + + /* + * Initialise the decrementer-based clock. + */ + decr_init(); + + /* + * Good {morning,afternoon,evening,night}. + */ + cpu_setup(PCPU_GET(cpuid)); + +#ifdef PERFMON + perfmon_init(); +#endif + printf("real memory = %ld (%ld MB)\n", ptoa(physmem), + ptoa(physmem) / 1048576); + realmem = physmem; + + if (bootverbose) + printf("available KVA = %zd (%zd MB)\n", + virtual_end - virtual_avail, + (virtual_end - virtual_avail) / 1048576); + + /* + * Display any holes after the first chunk of extended memory. + */ + if (bootverbose) { + int indx; + + printf("Physical memory chunk(s):\n"); + for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { + vm_offset_t size1 = + phys_avail[indx + 1] - phys_avail[indx]; + + #ifdef __powerpc64__ + printf("0x%016lx - 0x%016lx, %ld bytes (%ld pages)\n", + #else + printf("0x%08x - 0x%08x, %d bytes (%ld pages)\n", + #endif + phys_avail[indx], phys_avail[indx + 1] - 1, size1, + size1 / PAGE_SIZE); + } + } + + vm_ksubmap_init(&kmi); + + printf("avail memory = %ld (%ld MB)\n", ptoa(vm_cnt.v_free_count), + ptoa(vm_cnt.v_free_count) / 1048576); + + /* + * Set up buffers, so they can be used to read disk labels. + */ + bufinit(); + vm_pager_bufferinit(); +} + +extern vm_offset_t __startkernel, __endkernel; +extern unsigned char __bss_start[]; +extern unsigned char __sbss_start[]; +extern unsigned char __sbss_end[]; +extern unsigned char _end[]; + +void aim_cpu_init(vm_offset_t toc); +void booke_cpu_init(void); + +uintptr_t +powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp) +{ + struct pcpu *pc; + vm_offset_t startkernel, endkernel; + void *kmdp; + char *env; +#ifdef DDB + vm_offset_t ksym_start; + vm_offset_t ksym_end; +#endif + + kmdp = NULL; + + /* First guess at start/end kernel positions */ + startkernel = __startkernel; + endkernel = __endkernel; + + /* Check for ePAPR loader, which puts a magic value into r6 */ + if (mdp == (void *)0x65504150) + mdp = NULL; + + /* + * Parse metadata if present and fetch parameters. Must be done + * before console is inited so cninit gets the right value of + * boothowto. + */ + if (mdp != NULL) { + preload_metadata = mdp; + kmdp = preload_search_by_type("elf kernel"); + if (kmdp != NULL) { + boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); + kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); + endkernel = ulmax(endkernel, MD_FETCH(kmdp, + MODINFOMD_KERNEND, vm_offset_t)); +#ifdef DDB + ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); + ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); + db_fetch_ksymtab(ksym_start, ksym_end); +#endif + } + } else { + bzero(__sbss_start, __sbss_end - __sbss_start); + bzero(__bss_start, _end - __bss_start); + } +#ifdef BOOKE + tlb1_init(); +#endif + + /* Store boot environment state */ + OF_initial_setup((void *)fdt, NULL, (int (*)(void *))ofentry); + + /* + * Init params/tunables that can be overridden by the loader + */ + init_param1(); + + /* + * Start initializing proc0 and thread0. + */ + proc_linkup0(&proc0, &thread0); + thread0.td_frame = &frame0; + + /* + * Set up per-cpu data. + */ + pc = __pcpu; + pcpu_init(pc, 0, sizeof(struct pcpu)); + pc->pc_curthread = &thread0; +#ifdef __powerpc64__ + __asm __volatile("mr 13,%0" :: "r"(pc->pc_curthread)); +#else + __asm __volatile("mr 2,%0" :: "r"(pc->pc_curthread)); +#endif + pc->pc_cpuid = 0; + + __asm __volatile("mtsprg 0, %0" :: "r"(pc)); + + /* + * Init mutexes, which we use heavily in PMAP + */ + + mutex_init(); + + /* + * Install the OF client interface + */ + + OF_bootstrap(); + + /* + * Initialize the console before printing anything. + */ + cninit(); + + /* + * Complain if there is no metadata. + */ + if (mdp == NULL || kmdp == NULL) { + printf("powerpc_init: no loader metadata.\n"); + } + + /* + * Init KDB + */ + + kdb_init(); + +#ifdef AIM + aim_cpu_init(toc); +#else /* BOOKE */ + booke_cpu_init(); + + /* Make sure the kernel icache is valid before we go too much further */ + __syncicache((caddr_t)startkernel, endkernel - startkernel); +#endif + + /* + * Choose a platform module so we can get the physical memory map. + */ + + platform_probe_and_attach(); + + /* + * Bring up MMU + */ + pmap_bootstrap(startkernel, endkernel); + mtmsr(PSL_KERNSET & ~PSL_EE); + + /* + * Initialize params/tunables that are derived from memsize + */ + init_param2(physmem); + + /* + * Grab booted kernel's name + */ + env = kern_getenv("kernelname"); + if (env != NULL) { + strlcpy(kernelname, env, sizeof(kernelname)); + freeenv(env); + } + + /* + * Finish setting up thread0. + */ + thread0.td_pcb = (struct pcb *) + ((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE - + sizeof(struct pcb)) & ~15UL); + bzero((void *)thread0.td_pcb, sizeof(struct pcb)); + pc->pc_curpcb = thread0.td_pcb; + + /* Initialise the message buffer. */ + msgbufinit(msgbufp, msgbufsize); + +#ifdef KDB + if (boothowto & RB_KDB) + kdb_enter(KDB_WHY_BOOTFLAGS, + "Boot flags requested debugger"); +#endif + + return (((uintptr_t)thread0.td_pcb - + (sizeof(struct callframe) - 3*sizeof(register_t))) & ~15UL); +} + +void +bzero(void *buf, size_t len) +{ + caddr_t p; + + p = buf; + + while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { + *p++ = 0; + len--; + } + + while (len >= sizeof(u_long) * 8) { + *(u_long*) p = 0; + *((u_long*) p + 1) = 0; + *((u_long*) p + 2) = 0; + *((u_long*) p + 3) = 0; + len -= sizeof(u_long) * 8; + *((u_long*) p + 4) = 0; + *((u_long*) p + 5) = 0; + *((u_long*) p + 6) = 0; + *((u_long*) p + 7) = 0; + p += sizeof(u_long) * 8; + } + + while (len >= sizeof(u_long)) { + *(u_long*) p = 0; + len -= sizeof(u_long); + p += sizeof(u_long); + } + + while (len) { + *p++ = 0; + len--; + } +} + +/* + * Flush the D-cache for non-DMA I/O so that the I-cache can + * be made coherent later. + */ +void +cpu_flush_dcache(void *ptr, size_t len) +{ + register_t addr, off; + + /* + * Align the address to a cacheline and adjust the length + * accordingly. Then round the length to a multiple of the + * cacheline for easy looping. + */ + addr = (uintptr_t)ptr; + off = addr & (cacheline_size - 1); + addr -= off; + len = (len + off + cacheline_size - 1) & ~(cacheline_size - 1); + + while (len > 0) { + __asm __volatile ("dcbf 0,%0" :: "r"(addr)); + __asm __volatile ("sync"); + addr += cacheline_size; + len -= cacheline_size; + } +} + +int +ptrace_set_pc(struct thread *td, unsigned long addr) +{ + struct trapframe *tf; + + tf = td->td_frame; + tf->srr0 = (register_t)addr; + + return (0); +} + +void +spinlock_enter(void) +{ + struct thread *td; + register_t msr; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) { + __asm __volatile("or 2,2,2"); /* Set high thread priority */ + msr = intr_disable(); + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_msr = msr; + } else + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + register_t msr; + + td = curthread; + critical_exit(); + msr = td->td_md.md_saved_msr; + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) { + intr_restore(msr); + __asm __volatile("or 6,6,6"); /* Set normal thread priority */ + } +} + Property changes on: head/sys/powerpc/powerpc/machdep.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/powerpc/powerpc/uma_machdep.c =================================================================== --- head/sys/powerpc/powerpc/uma_machdep.c (nonexistent) +++ head/sys/powerpc/powerpc/uma_machdep.c (revision 282264) @@ -0,0 +1,98 @@ +/*- + * Copyright (c) 2003 The FreeBSD Project + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int hw_uma_mdpages; +SYSCTL_INT(_hw, OID_AUTO, uma_mdpages, CTLFLAG_RD, &hw_uma_mdpages, 0, + "UMA MD pages in use"); + +void * +uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) +{ + void *va; + vm_page_t m; + int pflags; + + *flags = UMA_SLAB_PRIV; + pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; + + for (;;) { + m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); + if (m == NULL) { + if (wait & M_NOWAIT) + return (NULL); + VM_WAIT; + } else + break; + } + + va = (void *) VM_PAGE_TO_PHYS(m); + + if (!hw_direct_map) + pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); + + if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) + bzero(va, PAGE_SIZE); + atomic_add_int(&hw_uma_mdpages, 1); + + return (va); +} + +void +uma_small_free(void *mem, vm_size_t size, u_int8_t flags) +{ + vm_page_t m; + + if (!hw_direct_map) + pmap_remove(kernel_pmap,(vm_offset_t)mem, + (vm_offset_t)mem + PAGE_SIZE); + + m = PHYS_TO_VM_PAGE((vm_offset_t)mem); + m->wire_count--; + vm_page_free(m); + atomic_subtract_int(&vm_cnt.v_wire_count, 1); + atomic_subtract_int(&hw_uma_mdpages, 1); +} Property changes on: head/sys/powerpc/powerpc/uma_machdep.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property