Index: head/sys/sparc64/sparc64/locore.S =================================================================== --- head/sys/sparc64/sparc64/locore.S (revision 89037) +++ head/sys/sparc64/sparc64/locore.S (revision 89038) @@ -1,84 +1,200 @@ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include +#include #include #include +#include #include "assym.s" .register %g2,#ignore .globl kernbase .set kernbase,KERNBASE /* - * void _start(caddr_t metadata, u_long o1, u_long o2, u_long o3, + * void _start(caddr_t metadata, u_int *state, u_int mid, u_int bootmid, * u_long ofw_vec) + * + * XXX: in am smp system the other cpus are started in the loader, but since + * there's no way to look up a symbol there, we need to use the same entry + * point. So if the module id is not equal to bootcpu, jump to _mp_start. */ ENTRY(_start) - wrpr %g0, PSTATE_IE | PSTATE_PRIV | PSTATE_PEF, %pstate - mov %o0, %g1 - mov %o4, %g2 + /* + * Initialize misc state to known values. Interrupts disabled, normal + * globals, windows flushed (cr = 0, cs = nwindows - 1), no clean + * windows, pil 0, and floating point disabled. + */ + wrpr %g0, PSTATE_NORMAL, %pstate flushw - wrpr %g0, 1, %cwp wrpr %g0, 0, %cleanwin wrpr %g0, 0, %pil wr %g0, 0, %fprs - SET(kstack0 + KSTACK_PAGES * PAGE_SIZE - PCB_SIZEOF, %l0, %o0) - sub %o0, SPOFF + CCFSZ, %sp +#ifdef SMP + /* + * If we're not the boot processor, go do other stuff. + */ + cmp %o2, %o3 + be %xcc, 1f + nop + call _mp_start + nop + sir +1: +#endif - mov %g1, %o0 + /* + * Get onto our per-cpu panic stack, which precedes the struct pcpu in + * the per-cpu page. + */ + SET(pcpu0 + PAGE_SIZE - PC_SIZEOF, %l1, %l0) + sub %l0, SPOFF + CCFSZ, %sp + + /* + * Enable interrupts. + */ + wrpr %g0, PSTATE_KERNEL, %pstate + + /* + * Do initial bootstrap to setup pmap and thread0. + */ call sparc64_init - mov %g2, %o1 + nop + + /* + * Get onto thread0's kstack. + */ + sub PCB_REG, SPOFF + CCFSZ, %sp + + /* + * And away we go. This doesn't return. + */ call mi_startup nop + sir ! NOTREACHED END(_start) + +/* + * void cpu_setregs(struct pcpu *pc) + */ +ENTRY(cpu_setregs) + ldx [%o0 + PC_CURTHREAD], %o1 + ldx [%o1 + TD_PCB], %o1 + + /* + * Disable interrupts, normal globals. + */ + wrpr %g0, PSTATE_NORMAL, %pstate + + /* + * Normal %g6 points to the current thread's pcb, and %g7 points to + * the per-cpu data structure. + */ + mov %o1, PCB_REG + mov %o0, PCPU_REG + + /* + * Alternate globals. + */ + wrpr %g0, PSTATE_ALT, %pstate + + /* + * Alternate %g5 points to a per-cpu panic stack, %g6 points to the + * current thread's pcb, and %g7 points to the per-cpu data structure. + */ + mov %o0, ASP_REG + mov %o1, PCB_REG + mov %o0, PCPU_REG + + /* + * Interrupt globals. + */ + wrpr %g0, PSTATE_INTR, %pstate + + /* + * Interrupt %g7 points to the per-cpu data structure. + */ + mov %o0, PCPU_REG + + /* + * MMU globals. + */ + wrpr %g0, PSTATE_MMU, %pstate + + /* + * MMU %g7 points to the user tsb. Initialize it to something sane + * here to catch invalid use. + */ + mov %g0, TSB_REG + + /* + * Normal globals again. + */ + wrpr %g0, PSTATE_NORMAL, %pstate + + /* + * Force trap level 1 and take over the trap table. + */ + SET(tl0_base, %o2, %o1) + wrpr %g0, 1, %tl + wrpr %o1, 0, %tba + + /* + * Re-enable interrupts. + */ + wrpr %g0, PSTATE_KERNEL, %pstate + + retl + nop +END(cpu_setregs) /* * Signal trampoline, copied out to user stack. Must be 16 byte aligned or * the argv and envp pointers can become misaligned. */ ENTRY(sigcode) call %o4 nop add %sp, SPOFF + CCFSZ + SF_UC, %o0 mov SYS_sigreturn, %g1 ta %xcc, 9 mov SYS_exit, %g1 ta %xcc, 9 illtrap .align 16 esigcode: END(sigcode) DATA(szsigcode) .long esigcode - sigcode Index: head/sys/sparc64/sparc64/locore.s =================================================================== --- head/sys/sparc64/sparc64/locore.s (revision 89037) +++ head/sys/sparc64/sparc64/locore.s (revision 89038) @@ -1,84 +1,200 @@ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include +#include #include #include +#include #include "assym.s" .register %g2,#ignore .globl kernbase .set kernbase,KERNBASE /* - * void _start(caddr_t metadata, u_long o1, u_long o2, u_long o3, + * void _start(caddr_t metadata, u_int *state, u_int mid, u_int bootmid, * u_long ofw_vec) + * + * XXX: in am smp system the other cpus are started in the loader, but since + * there's no way to look up a symbol there, we need to use the same entry + * point. So if the module id is not equal to bootcpu, jump to _mp_start. */ ENTRY(_start) - wrpr %g0, PSTATE_IE | PSTATE_PRIV | PSTATE_PEF, %pstate - mov %o0, %g1 - mov %o4, %g2 + /* + * Initialize misc state to known values. Interrupts disabled, normal + * globals, windows flushed (cr = 0, cs = nwindows - 1), no clean + * windows, pil 0, and floating point disabled. + */ + wrpr %g0, PSTATE_NORMAL, %pstate flushw - wrpr %g0, 1, %cwp wrpr %g0, 0, %cleanwin wrpr %g0, 0, %pil wr %g0, 0, %fprs - SET(kstack0 + KSTACK_PAGES * PAGE_SIZE - PCB_SIZEOF, %l0, %o0) - sub %o0, SPOFF + CCFSZ, %sp +#ifdef SMP + /* + * If we're not the boot processor, go do other stuff. + */ + cmp %o2, %o3 + be %xcc, 1f + nop + call _mp_start + nop + sir +1: +#endif - mov %g1, %o0 + /* + * Get onto our per-cpu panic stack, which precedes the struct pcpu in + * the per-cpu page. + */ + SET(pcpu0 + PAGE_SIZE - PC_SIZEOF, %l1, %l0) + sub %l0, SPOFF + CCFSZ, %sp + + /* + * Enable interrupts. + */ + wrpr %g0, PSTATE_KERNEL, %pstate + + /* + * Do initial bootstrap to setup pmap and thread0. + */ call sparc64_init - mov %g2, %o1 + nop + + /* + * Get onto thread0's kstack. + */ + sub PCB_REG, SPOFF + CCFSZ, %sp + + /* + * And away we go. This doesn't return. + */ call mi_startup nop + sir ! NOTREACHED END(_start) + +/* + * void cpu_setregs(struct pcpu *pc) + */ +ENTRY(cpu_setregs) + ldx [%o0 + PC_CURTHREAD], %o1 + ldx [%o1 + TD_PCB], %o1 + + /* + * Disable interrupts, normal globals. + */ + wrpr %g0, PSTATE_NORMAL, %pstate + + /* + * Normal %g6 points to the current thread's pcb, and %g7 points to + * the per-cpu data structure. + */ + mov %o1, PCB_REG + mov %o0, PCPU_REG + + /* + * Alternate globals. + */ + wrpr %g0, PSTATE_ALT, %pstate + + /* + * Alternate %g5 points to a per-cpu panic stack, %g6 points to the + * current thread's pcb, and %g7 points to the per-cpu data structure. + */ + mov %o0, ASP_REG + mov %o1, PCB_REG + mov %o0, PCPU_REG + + /* + * Interrupt globals. + */ + wrpr %g0, PSTATE_INTR, %pstate + + /* + * Interrupt %g7 points to the per-cpu data structure. + */ + mov %o0, PCPU_REG + + /* + * MMU globals. + */ + wrpr %g0, PSTATE_MMU, %pstate + + /* + * MMU %g7 points to the user tsb. Initialize it to something sane + * here to catch invalid use. + */ + mov %g0, TSB_REG + + /* + * Normal globals again. + */ + wrpr %g0, PSTATE_NORMAL, %pstate + + /* + * Force trap level 1 and take over the trap table. + */ + SET(tl0_base, %o2, %o1) + wrpr %g0, 1, %tl + wrpr %o1, 0, %tba + + /* + * Re-enable interrupts. + */ + wrpr %g0, PSTATE_KERNEL, %pstate + + retl + nop +END(cpu_setregs) /* * Signal trampoline, copied out to user stack. Must be 16 byte aligned or * the argv and envp pointers can become misaligned. */ ENTRY(sigcode) call %o4 nop add %sp, SPOFF + CCFSZ + SF_UC, %o0 mov SYS_sigreturn, %g1 ta %xcc, 9 mov SYS_exit, %g1 ta %xcc, 9 illtrap .align 16 esigcode: END(sigcode) DATA(szsigcode) .long esigcode - sigcode Index: head/sys/sparc64/sparc64/machdep.c =================================================================== --- head/sys/sparc64/sparc64/machdep.c (revision 89037) +++ head/sys/sparc64/sparc64/machdep.c (revision 89038) @@ -1,724 +1,682 @@ /*- * Copyright (c) 2001 Jake Burkholder. * Copyright (c) 1992 Terrence R. Lambert. * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 * from: FreeBSD: src/sys/i386/i386/machdep.c,v 1.477 2001/08/27 * $FreeBSD$ */ #include "opt_ddb.h" #include "opt_msgbuf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MD_FETCH(mdp, info, type) ({ \ type *__p; \ __p = (type *)preload_search_info((mdp), MODINFO_METADATA | (info)); \ __p ? *__p : 0; \ }) typedef int ofw_vec_t(void *); -extern char tl0_base[]; - -extern char _end[]; - int physmem; int cold = 1; long dumplo; int Maxmem; -u_long debug_mask; - struct mtx Giant; struct mtx sched_lock; -static struct pcpu __pcpu; -/* - * This needs not be aligned as the other user areas, provided that process 0 - * does not have an fp state (which it doesn't normally). - * This constraint is only here for debugging. - */ -char kstack0[KSTACK_PAGES * PAGE_SIZE] __attribute__((aligned(64))); -static char uarea0[UAREA_PAGES * PAGE_SIZE] __attribute__((aligned(64))); -static struct trapframe frame0; -struct user *proc0uarea; -vm_offset_t proc0kstack; +char pcpu0[PAGE_SIZE]; +char uarea0[UAREA_PAGES * PAGE_SIZE]; +struct trapframe frame0; -char panic_stack[PANIC_STACK_PAGES * PAGE_SIZE]; +vm_offset_t kstack0; +vm_offset_t kstack0_phys; struct kva_md_info kmi; u_long ofw_vec; u_long ofw_tba; static struct timecounter tick_tc; static timecounter_get_t tick_get_timecount; -void sparc64_init(caddr_t mdp, ofw_vec_t *vec); +void sparc64_init(caddr_t mdp, u_int *state, u_int mid, u_int bootmid, + ofw_vec_t *vec); void sparc64_shutdown_final(void *dummy, int howto); static void cpu_startup(void *); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); +CTASSERT(sizeof(struct pcpu) <= (PAGE_SIZE / 2)); + static void cpu_startup(void *arg) { phandle_t child; phandle_t root; char type[8]; u_int clock; root = OF_peer(0); for (child = OF_child(root); child != 0; child = OF_peer(child)) { OF_getprop(child, "device_type", type, sizeof(type)); if (strcmp(type, "cpu") == 0) break; } if (child == 0) panic("cpu_startup: no cpu\n"); OF_getprop(child, "clock-frequency", &clock, sizeof(clock)); tick_tc.tc_get_timecount = tick_get_timecount; tick_tc.tc_poll_pps = NULL; tick_tc.tc_counter_mask = ~0u; tick_tc.tc_frequency = clock; tick_tc.tc_name = "tick"; tc_init(&tick_tc); cpu_identify(clock); cache_init(child); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); tick_start(clock, tick_hardclock); EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL, SHUTDOWN_PRI_LAST); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } unsigned tick_get_timecount(struct timecounter *tc) { return ((unsigned)rd(tick)); } void -sparc64_init(caddr_t mdp, ofw_vec_t *vec) +sparc64_init(caddr_t mdp, u_int *state, u_int mid, u_int bootmid, + ofw_vec_t *vec) { + struct pcpu *pc; vm_offset_t end; vm_offset_t off; caddr_t kmdp; - u_long ps; end = 0; kmdp = NULL; /* * Initialize openfirmware (needed for console). */ OF_init(vec); /* * Parse metadata if present and fetch parameters. Must be before the * console is inited so cninit gets the right value of boothowto. */ if (mdp != NULL) { preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) { boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); end = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); } } /* * Initialize the console before printing anything. */ cninit(); /* * Panic is there is no metadata. Most likely the kernel was booted * directly, instead of through loader(8). */ if (mdp == NULL || kmdp == NULL) { printf("sparc64_init: no loader metadata.\n" "This probably means you are not using loader(8).\n"); panic("sparc64_init"); } /* * Sanity check the kernel end, which is important. */ if (end == 0) { printf("sparc64_init: warning, kernel end not specified.\n" "Attempting to continue anyway.\n"); end = (vm_offset_t)_end; } /* * XXX calculate physmem */ /* * Initialize tunables. */ init_param1(); init_param2(physmem); #ifdef DDB kdb_init(); #endif /* * Initialize virtual memory. */ pmap_bootstrap(end); /* * Disable tick for now. */ tick_stop(); - /* Initialize the interrupt tables. */ + /* + * Initialize the interrupt tables. + */ intr_init(); - proc_linkup(&proc0); /* * Initialize proc0 stuff (p_contested needs to be done early). */ - proc0uarea = (struct user *)uarea0; - proc0kstack = (vm_offset_t)kstack0; + proc_linkup(&proc0); proc0.p_md.md_utrap = NULL; - proc0.p_uarea = proc0uarea; + proc0.p_uarea = (struct user *)uarea0; proc0.p_stats = &proc0.p_uarea->u_stats; thread0 = &proc0.p_thread; - thread0->td_kstack = proc0kstack; + thread0->td_kstack = kstack0; thread0->td_pcb = (struct pcb *) (thread0->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; frame0.tf_tstate = TSTATE_IE | TSTATE_PEF; thread0->td_frame = &frame0; LIST_INIT(&thread0->td_contested); /* - * Force trap level 1 and take over the trap table. + * Prime our per-cpu data page for use. Note, we are using it for our + * stack, so don't pass the real size (PAGE_SIZE) to pcpu_init or + * it'll zero it out from under us. */ - ps = rdpr(pstate); - wrpr(pstate, ps, PSTATE_IE); - wrpr(tl, 0, 1); - wrpr(tba, tl0_base, 0); + pc = (struct pcpu *)(pcpu0 + PAGE_SIZE) - 1; + pcpu_init(pc, 0, sizeof(struct pcpu)); + pc->pc_curthread = thread0; + pc->pc_curpcb = thread0->td_pcb; + pc->pc_mid = mid; /* - * Setup preloaded globals. - * Normal %g7 points to the per-cpu data structure. + * Initialize global registers. */ - __asm __volatile("mov %0, %%g7" : : "r" (&__pcpu)); + cpu_setregs(pc); /* - * Alternate %g5 points to a per-cpu stack for saving alternate - * globals, %g6 points to the current thread's pcb, and %g7 points - * to the per-cpu data structure. - */ - wrpr(pstate, ps, PSTATE_AG | PSTATE_IE); - __asm __volatile("mov %0, %%g5" : : "r" - (&__pcpu.pc_alt_stack[ALT_STACK_SIZE - 1])); - __asm __volatile("mov %0, %%g6" : : "r" (thread0->td_pcb)); - __asm __volatile("mov %0, %%g7" : : "r" (&__pcpu)); - - /* - * Interrupt %g6 points to a per-cpu interrupt queue, %g7 points to - * the interrupt vector table. - */ - wrpr(pstate, ps, PSTATE_IG | PSTATE_IE); - __asm __volatile("mov %0, %%g6" : : "r" (&__pcpu.pc_iq)); - __asm __volatile("mov %0, %%g7" : : "r" (&intr_vectors)); - - /* - * MMU %g7 points to the user tsb. Initialize it to something sane - * here to catch invalid use. - */ - wrpr(pstate, ps, PSTATE_MG | PSTATE_IE); - __asm __volatile("mov %%g0, %%g7" : :); - - wrpr(pstate, ps, 0); - - /* * Map and initialize the message buffer (after setting trap table). */ for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) pmap_kenter((vm_offset_t)msgbufp + off, msgbuf_phys + off); msgbufinit(msgbufp, MSGBUF_SIZE); - - /* - * Initialize curthread so that mutexes work. - */ - pcpu_init(pcpup, 0, sizeof(struct pcpu)); - PCPU_SET(curthread, thread0); - PCPU_SET(curpcb, thread0->td_pcb); /* * Initialize mutexes. */ mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); mtx_lock(&Giant); } void set_openfirm_callback(ofw_vec_t *vec) { ofw_tba = rdpr(tba); ofw_vec = (u_long)vec; } void sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) { struct trapframe *tf; struct sigframe *sfp; struct sigacts *psp; struct sigframe sf; struct thread *td; struct frame *fp; struct proc *p; int oonstack; u_long sp; oonstack = 0; td = curthread; p = td->td_proc; psp = p->p_sigacts; tf = td->td_frame; sp = tf->tf_sp + SPOFF; oonstack = sigonstack(sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Save user context. */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = p->p_sigstk; sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; bcopy(tf->tf_global, sf.sf_uc.uc_mcontext.mc_global, sizeof (tf->tf_global)); bcopy(tf->tf_out, sf.sf_uc.uc_mcontext.mc_out, sizeof (tf->tf_out)); sf.sf_uc.uc_mcontext.mc_tpc = tf->tf_tpc; sf.sf_uc.uc_mcontext.mc_tnpc = tf->tf_tnpc; sf.sf_uc.uc_mcontext.mc_tstate = tf->tf_tstate; /* Allocate and validate space for the signal handler context. */ if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe *)(p->p_sigstk.ss_sp + p->p_sigstk.ss_size - sizeof(struct sigframe)); #if defined(COMPAT_43) || defined(COMPAT_SUNOS) p->p_sigstk.ss_flags |= SS_ONSTACK; #endif } else sfp = (struct sigframe *)sp - 1; PROC_UNLOCK(p); /* * grow_stack() will return 0 if *sfp does not fit inside the stack * and the stack can not be grown. * useracc() will return FALSE if access is denied. */ fp = (struct frame *)sfp - 1; if (vm_map_growstack(p, (u_long)fp) != KERN_SUCCESS || !useracc((caddr_t)fp, sizeof(*fp) + sizeof(*sfp), VM_PROT_WRITE)) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ CTR2(KTR_SIG, "sendsig: trashed stack td=%p sfp=%p", td, sfp); PROC_LOCK(p); SIGACTION(p, SIGILL) = SIG_DFL; SIGDELSET(p->p_sigignore, SIGILL); SIGDELSET(p->p_sigcatch, SIGILL); SIGDELSET(p->p_sigmask, SIGILL); psignal(p, SIGILL); return; } /* Translate the signal if appropriate. */ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; /* Build the argument list for the signal handler. */ tf->tf_out[0] = sig; tf->tf_out[2] = (register_t)&sfp->sf_uc; tf->tf_out[3] = tf->tf_type; tf->tf_out[4] = (register_t)catcher; PROC_LOCK(p); if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ tf->tf_out[1] = (register_t)&sfp->sf_si; /* Fill siginfo structure. */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = code; sf.sf_si.si_addr = (void *)tf->tf_sfar; } else { /* Old FreeBSD-style arguments. */ tf->tf_out[1] = code; } PROC_UNLOCK(p); /* Copy the sigframe out to the user's stack. */ if (rwindow_save(td) != 0 || copyout(&sf, sfp, sizeof(*sfp)) != 0 || suword(&fp->f_in[6], tf->tf_out[6]) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p sfp=%p", td, sfp); PROC_LOCK(p); sigexit(td, SIGILL); /* NOTREACHED */ } tf->tf_tpc = PS_STRINGS - *(p->p_sysent->sv_szsigcode); tf->tf_tnpc = tf->tf_tpc + 4; tf->tf_sp = (u_long)fp - SPOFF; CTR3(KTR_SIG, "sendsig: return td=%p pc=%#lx sp=%#lx", td, tf->tf_tpc, tf->tf_sp); PROC_LOCK(p); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sigreturn(struct thread *td, struct sigreturn_args *uap) { struct trapframe *tf; struct proc *p; ucontext_t uc; p = td->td_proc; if (rwindow_save(td)) { PROC_LOCK(p); sigexit(td, SIGILL); } CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } if (((uc.uc_mcontext.mc_tpc | uc.uc_mcontext.mc_tnpc) & 3) != 0) return (EINVAL); if (!TSTATE_SECURE(uc.uc_mcontext.mc_tstate)) return (EINVAL); tf = td->td_frame; bcopy(uc.uc_mcontext.mc_global, tf->tf_global, sizeof(tf->tf_global)); bcopy(uc.uc_mcontext.mc_out, tf->tf_out, sizeof(tf->tf_out)); tf->tf_tpc = uc.uc_mcontext.mc_tpc; tf->tf_tnpc = uc.uc_mcontext.mc_tnpc; tf->tf_tstate = uc.uc_mcontext.mc_tstate; PROC_LOCK(p); #if defined(COMPAT_43) || defined(COMPAT_SUNOS) if (uc.uc_mcontext.mc_onstack & 1) p->p_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigstk.ss_flags &= ~SS_ONSTACK; #endif p->p_sigmask = uc.uc_sigmask; SIG_CANTMASK(p->p_sigmask); PROC_UNLOCK(p); CTR4(KTR_SIG, "sigreturn: return td=%p pc=%#lx sp=%#lx tstate=%#lx", td, tf->tf_tpc, tf->tf_sp, tf->tf_tstate); return (EJUSTRETURN); } /* * Duplicate OF_exit() with a different firmware call function that restores * the trap table, otherwise a RED state exception is triggered in at least * some firmware versions. */ void cpu_halt(void) { static struct { cell_t name; cell_t nargs; cell_t nreturns; } args = { (cell_t)"exit", 0, 0 }; openfirmware_exit(&args); } void sparc64_shutdown_final(void *dummy, int howto) { static struct { cell_t name; cell_t nargs; cell_t nreturns; } args = { (cell_t)"SUNW,power-off", 0, 0 }; /* Turn the power off? */ if ((howto & RB_POWEROFF) != 0) openfirmware_exit(&args); /* In case of halt, return to the firmware */ if ((howto & RB_HALT) != 0) cpu_halt(); } int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_tpc = addr; td->td_frame->tf_tnpc = addr + 4; return (0); } int ptrace_single_step(struct thread *td) { TODO; return (0); } void setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings) { struct trapframe *tf; struct md_utrap *ut; struct pcb *pcb; u_long sp; /* XXX no cpu_exec */ if ((ut = td->td_proc->p_md.md_utrap) != NULL) { ut->ut_refcnt--; if (ut->ut_refcnt == 0) free(ut, M_SUBPROC); td->td_proc->p_md.md_utrap = NULL; } pcb = td->td_pcb; sp = rounddown(stack, 16); tf = td->td_frame; fp_init_thread(td); bzero(pcb->pcb_rw, sizeof(pcb->pcb_rw)); bzero(pcb->pcb_rwsp, sizeof(pcb->pcb_rwsp)); pcb->pcb_nsaved = 0; bzero(tf, sizeof (*tf)); tf->tf_out[0] = stack; tf->tf_out[3] = PS_STRINGS; tf->tf_out[6] = sp - SPOFF - sizeof(struct frame); tf->tf_tnpc = entry + 4; tf->tf_tpc = entry; tf->tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_MM_TSO; td->td_retval[0] = tf->tf_out[0]; td->td_retval[1] = tf->tf_out[1]; } void Debugger(const char *msg) { printf("Debugger(\"%s\")\n", msg); critical_enter(); breakpoint(); critical_exit(); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; bcopy(tf->tf_global, regs->r_global, sizeof(tf->tf_global)); bcopy(tf->tf_out, regs->r_out, sizeof(tf->tf_out)); regs->r_npc = tf->tf_tnpc; regs->r_pc = tf->tf_tpc; regs->r_tstate = tf->tf_tstate; regs->r_y = tf->tf_y; return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; if (((regs->r_pc | regs->r_npc) & 3) != 0) return (EINVAL); if (!TSTATE_SECURE(regs->r_tstate)) return (EINVAL); bcopy(regs->r_global, tf->tf_global, sizeof(regs->r_global)); bcopy(regs->r_out, tf->tf_out, sizeof(regs->r_out)); tf->tf_tnpc = regs->r_npc; tf->tf_tpc = regs->r_pc; tf->tf_tstate = regs->r_tstate; tf->tf_y = regs->r_y; return (0); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { return (ENOSYS); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { return (ENOSYS); } int fill_fpregs(struct thread *td, struct fpreg *fpregs) { struct trapframe *tf; struct pcb *pcb; pcb = td->td_pcb; tf = td->td_frame; bcopy(pcb->pcb_fpstate.fp_fb, fpregs->fr_regs, sizeof(pcb->pcb_fpstate.fp_fb)); fpregs->fr_fprs = tf->tf_fprs; fpregs->fr_fsr = tf->tf_fsr; return (0); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { struct trapframe *tf; struct pcb *pcb; pcb = td->td_pcb; tf = td->td_frame; bcopy(fpregs->fr_regs, pcb->pcb_fpstate.fp_fb, sizeof(fpregs->fr_regs)); tf->tf_fprs = fpregs->fr_fprs; tf->tf_fsr = fpregs->fr_fsr; return (0); } Index: head/sys/sparc64/sparc64/pmap.c =================================================================== --- head/sys/sparc64/sparc64/pmap.c (revision 89037) +++ head/sys/sparc64/sparc64/pmap.c (revision 89038) @@ -1,1697 +1,1727 @@ /* * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * $FreeBSD$ */ /* * Manages physical address maps. * * In addition to hardware address maps, this module is called upon to * provide software-use-only maps which may or may not be stored in the * same form as hardware maps. These pseudo-maps are used to store * intermediate results from copy operations to and from address spaces. * * Since the information managed by this module is also stored by the * logical address mapping module, this module may throw away valid virtual * to physical mappings at almost any time. However, invalidations of * mappings must be done as requested. * * In order to cope with hardware architectures which make virtual to * physical map invalidates expensive, this module may delay invalidate * reduced protection operations until such time as they are actually * necessary. This module is given full information as to which processors * are currently using which maps, and to when physical maps must be made * correct. */ #include "opt_msgbuf.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PMAP_DEBUG #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif struct mem_region { vm_offset_t mr_start; vm_offset_t mr_size; }; struct ofw_map { vm_offset_t om_start; vm_offset_t om_size; u_long om_tte; }; /* * Virtual and physical address of message buffer. */ struct msgbuf *msgbufp; vm_offset_t msgbuf_phys; /* * Physical addresses of first and last available physical page. */ vm_offset_t avail_start; vm_offset_t avail_end; int pmap_pagedaemon_waken; /* * Map of physical memory reagions. */ vm_offset_t phys_avail[128]; static struct mem_region mra[128]; static struct ofw_map translations[128]; static int translations_size; /* * First and last available kernel virtual addresses. */ vm_offset_t virtual_avail; vm_offset_t virtual_end; vm_offset_t kernel_vm_end; /* * The locked kernel page the kernel binary was loaded into. This will need * to become a list later. */ vm_offset_t kernel_page; /* * Kernel pmap. */ struct pmap kernel_pmap_store; /* * Map of free and in use hardware contexts and index of first potentially * free context. */ static char pmap_context_map[PMAP_CONTEXT_MAX]; static u_int pmap_context_base; /* * Virtual addresses of free space for temporary mappings. Used for copying, * zeroing and mapping physical pages for /dev/mem accesses. */ static vm_offset_t CADDR1; static vm_offset_t CADDR2; static boolean_t pmap_initialized = FALSE; /* Convert a tte data field into a page mask */ static vm_offset_t pmap_page_masks[] = { PAGE_MASK_8K, PAGE_MASK_64K, PAGE_MASK_512K, PAGE_MASK_4M }; #define PMAP_TD_GET_MASK(d) pmap_page_masks[TD_GET_SIZE((d))] /* * Allocate and free hardware context numbers. */ static u_int pmap_context_alloc(void); static void pmap_context_destroy(u_int i); /* * Allocate physical memory for use in pmap_bootstrap. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size); /* * If user pmap is processed with pmap_remove and with pmap_remove and the * resident count drops to 0, there are no more pages to remove, so we * need not continue. */ #define PMAP_REMOVE_DONE(pm) \ ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0) /* * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove() * and pmap_protect() instead of trying each virtual address. */ #define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE) /* Callbacks for tsb_foreach. */ tsb_callback_t pmap_remove_tte; tsb_callback_t pmap_protect_tte; /* * Quick sort callout for comparing memory regions. */ static int mr_cmp(const void *a, const void *b); static int om_cmp(const void *a, const void *b); static int mr_cmp(const void *a, const void *b) { const struct mem_region *mra; const struct mem_region *mrb; mra = a; mrb = b; if (mra->mr_start < mrb->mr_start) return (-1); else if (mra->mr_start > mrb->mr_start) return (1); else return (0); } static int om_cmp(const void *a, const void *b) { const struct ofw_map *oma; const struct ofw_map *omb; oma = a; omb = b; if (oma->om_start < omb->om_start) return (-1); else if (oma->om_start > omb->om_start) return (1); else return (0); } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap(vm_offset_t ekva) { struct pmap *pm; struct tte tte; struct tte *tp; vm_offset_t off; vm_offset_t pa; vm_offset_t va; ihandle_t pmem; ihandle_t vmem; int sz; int i; int j; /* * Set the start and end of kva. The kernel is loaded at the first * available 4 meg super page, so round up to the end of the page. */ virtual_avail = roundup2(ekva, PAGE_SIZE_4M); virtual_end = VM_MAX_KERNEL_ADDRESS; /* Look up the page the kernel binary was loaded into. */ kernel_page = TD_GET_PA(ldxa(TLB_DAR_SLOT(TLB_SLOT_KERNEL), ASI_DTLB_DATA_ACCESS_REG)); /* * Find out what physical memory is available from the prom and * initialize the phys_avail array. This must be done before * pmap_bootstrap_alloc is called. */ if ((pmem = OF_finddevice("/memory")) == -1) panic("pmap_bootstrap: finddevice /memory"); if ((sz = OF_getproplen(pmem, "available")) == -1) panic("pmap_bootstrap: getproplen /memory/available"); if (sizeof(phys_avail) < sz) panic("pmap_bootstrap: phys_avail too small"); if (sizeof(mra) < sz) panic("pmap_bootstrap: mra too small"); bzero(mra, sz); if (OF_getprop(pmem, "available", mra, sz) == -1) panic("pmap_bootstrap: getprop /memory/available"); sz /= sizeof(*mra); CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); qsort(mra, sz, sizeof (*mra), mr_cmp); for (i = 0, j = 0; i < sz; i++, j += 2) { CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start, mra[i].mr_size); phys_avail[j] = mra[i].mr_start; phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size; } /* * Allocate the kernel tsb and lock it in the tlb. */ pa = pmap_bootstrap_alloc(KVA_PAGES * PAGE_SIZE_4M); if (pa & PAGE_MASK_4M) panic("pmap_bootstrap: tsb unaligned\n"); tsb_kernel_phys = pa; tsb_kernel = (struct tte *)virtual_avail; virtual_avail += KVA_PAGES * PAGE_SIZE_4M; - for (i = 0; i < KVA_PAGES; i++) { - va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M; - tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va); - tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) | - TD_L | TD_CP | TD_CV | TD_P | TD_W; - tlb_store_slot(TLB_DTLB, va, TLB_CTX_KERNEL, tte, - TLB_SLOT_TSB_KERNEL_MIN + i); - } + pmap_map_tsb(); bzero(tsb_kernel, KVA_PAGES * PAGE_SIZE_4M); /* - * Load the tsb registers. + * Allocate a kernel stack with guard page for thread0 and map it into + * the kernel tsb. */ - stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel); - stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel); - membar(Sync); - flush(tsb_kernel); + pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE); + kstack0_phys = pa; + kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); + virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; + for (i = 0; i < KSTACK_PAGES; i++) { + pa = kstack0_phys + i * PAGE_SIZE; + va = kstack0 + i * PAGE_SIZE; + pmap_kenter(va, pa); + tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va); + } /* * Allocate the message buffer. */ msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE); /* * Add the prom mappings to the kernel tsb. */ if ((vmem = OF_finddevice("/virtual-memory")) == -1) panic("pmap_bootstrap: finddevice /virtual-memory"); if ((sz = OF_getproplen(vmem, "translations")) == -1) panic("pmap_bootstrap: getproplen translations"); if (sizeof(translations) < sz) panic("pmap_bootstrap: translations too small"); bzero(translations, sz); if (OF_getprop(vmem, "translations", translations, sz) == -1) panic("pmap_bootstrap: getprop /virtual-memory/translations"); sz /= sizeof(*translations); translations_size = sz; CTR0(KTR_PMAP, "pmap_bootstrap: translations"); qsort(translations, sz, sizeof (*translations), om_cmp); for (i = 0; i < sz; i++) { CTR4(KTR_PMAP, "translation: start=%#lx size=%#lx tte=%#lx pa=%#lx", translations[i].om_start, translations[i].om_size, translations[i].om_tte, TD_GET_PA(translations[i].om_tte)); if (translations[i].om_start < 0xf0000000) /* XXX!!! */ continue; for (off = 0; off < translations[i].om_size; off += PAGE_SIZE) { va = translations[i].om_start + off; tte.tte_data = translations[i].om_tte + off; tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va); tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "mapping: va=%#lx tp=%p tte=%#lx pa=%#lx", va, tp, tte.tte_data, TD_GET_PA(tte.tte_data)); *tp = tte; } } /* * Calculate the first and last available physical addresses. */ avail_start = phys_avail[0]; for (i = 0; phys_avail[i + 2] != 0; i += 2) ; avail_end = phys_avail[i + 1]; Maxmem = sparc64_btop(avail_end); /* * Allocate virtual address space for copying and zeroing pages of * physical memory. */ CADDR1 = virtual_avail; virtual_avail += PAGE_SIZE; CADDR2 = virtual_avail; virtual_avail += PAGE_SIZE; /* * Allocate virtual address space for the message buffer. */ msgbufp = (struct msgbuf *)virtual_avail; virtual_avail += round_page(MSGBUF_SIZE); /* * Initialize the kernel pmap (which is statically allocated). */ pm = kernel_pmap; pm->pm_context = TLB_CTX_KERNEL; pm->pm_active = ~0; pm->pm_count = 1; TAILQ_INIT(&pm->pm_pvlist); +} + +void +pmap_map_tsb(void) +{ + struct tte tte; + vm_offset_t va; + vm_offset_t pa; + int i; + + /* + * Map the 4mb tsb pages. + */ + for (i = 0; i < KVA_PAGES; i++) { + va = (vm_offset_t)tsb_kernel + i * PAGE_SIZE_4M; + pa = tsb_kernel_phys + i * PAGE_SIZE_4M; + tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va); + tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) | + TD_L | TD_CP | TD_CV | TD_P | TD_W; + tlb_store_slot(TLB_DTLB, va, TLB_CTX_KERNEL, tte, + TLB_SLOT_TSB_KERNEL_MIN + i); + } + + /* + * Load the tsb registers. + */ + stxa(AA_DMMU_TSB, ASI_DMMU, (vm_offset_t)tsb_kernel); + stxa(AA_IMMU_TSB, ASI_IMMU, (vm_offset_t)tsb_kernel); + membar(Sync); + flush(tsb_kernel); /* * Set the secondary context to be the kernel context (needed for * fp block operations in the kernel and the cache code). */ stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL); membar(Sync); } /* * Allocate a physical page of memory directly from the phys_avail map. * Can only be called from pmap_bootstrap before avail start and end are * calculated. */ static vm_offset_t pmap_bootstrap_alloc(vm_size_t size) { vm_offset_t pa; int i; size = round_page(size); for (i = 0; phys_avail[i + 1] != 0; i += 2) { if (phys_avail[i + 1] - phys_avail[i] < size) continue; pa = phys_avail[i]; phys_avail[i] += size; return (pa); } panic("pmap_bootstrap_alloc"); } /* * Initialize the pmap module. */ void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { vm_offset_t addr; vm_size_t size; int result; int i; for (i = 0; i < vm_page_array_size; i++) { vm_page_t m; m = &vm_page_array[i]; TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } for (i = 0; i < translations_size; i++) { addr = translations[i].om_start; size = translations[i].om_size; if (addr < 0xf0000000) /* XXX */ continue; result = vm_map_find(kernel_map, NULL, 0, &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); if (result != KERN_SUCCESS || addr != translations[i].om_start) panic("pmap_init: vm_map_find"); } pvzone = &pvzone_store; pvinit = (struct pv_entry *)kmem_alloc(kernel_map, vm_page_array_size * sizeof (struct pv_entry)); zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, vm_page_array_size); pmap_initialized = TRUE; } /* * Initialize the address space (zone) for the pv_entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ void pmap_init2(void) { int shpgperproc; shpgperproc = PMAP_SHPGPERPROC; TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); } /* * Extract the physical page address associated with the given * map/virtual_address pair. */ vm_offset_t pmap_extract(pmap_t pm, vm_offset_t va) { struct tte *tp; u_long d; if (pm == kernel_pmap) return (pmap_kextract(va)); tp = tsb_tte_lookup(pm, va); if (tp == NULL) return (0); else { d = tp->tte_data; return (TD_GET_PA(d) | (va & PMAP_TD_GET_MASK(d))); } } /* * Extract the physical page address associated with the given kernel virtual * address. */ vm_offset_t pmap_kextract(vm_offset_t va) { struct tte *tp; u_long d; if (va >= KERNBASE && va < KERNBASE + PAGE_SIZE_4M) return (kernel_page + (va & PAGE_MASK_4M)); tp = tsb_kvtotte(va); d = tp->tte_data; if ((d & TD_V) == 0) return (0); return (TD_GET_PA(d) | (va & PMAP_TD_GET_MASK(d))); } int pmap_cache_enter(vm_page_t m, vm_offset_t va) { struct tte *tp; vm_offset_t pa; pv_entry_t pv; int c; int i; CTR2(KTR_PMAP, "pmap_cache_enter: m=%p va=%#lx", m, va); for (i = 0, c = 0; i < DCACHE_COLORS; i++) { if (i != DCACHE_COLOR(va)) c += m->md.colors[i]; } m->md.colors[DCACHE_COLOR(va)]++; if (c == 0) { CTR0(KTR_PMAP, "pmap_cache_enter: cacheable"); return (1); } else if (c != 1) { CTR0(KTR_PMAP, "pmap_cache_enter: already uncacheable"); return (0); } CTR0(KTR_PMAP, "pmap_cache_enter: marking uncacheable"); if ((m->flags & PG_UNMANAGED) != 0) panic("pmap_cache_enter: non-managed page"); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) != NULL) { atomic_clear_long(&tp->tte_data, TD_CV); tlb_page_demap(TLB_DTLB | TLB_ITLB, pv->pv_pmap->pm_context, pv->pv_va); } } pa = VM_PAGE_TO_PHYS(m); dcache_inval_phys(pa, pa + PAGE_SIZE - 1); return (0); } void pmap_cache_remove(vm_page_t m, vm_offset_t va) { CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, m->md.colors[DCACHE_COLOR(va)]); KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0, ("pmap_cache_remove: no mappings %d <= 0", m->md.colors[DCACHE_COLOR(va)])); m->md.colors[DCACHE_COLOR(va)]--; } /* * Map a wired page into kernel virtual address space. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { struct tte tte; struct tte *tp; tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va); tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W; tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", va, pa, tp, tp->tte_data); if ((tp->tte_data & TD_V) != 0) tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va); *tp = tte; } /* * Map a wired page into kernel virtual address space. This additionally * takes a flag argument wich is or'ed to the TTE data. This is used by * bus_space_map(). * NOTE: if the mapping is non-cacheable, it's the caller's responsibility * to flush entries that might still be in the cache, if applicable. */ void pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags) { struct tte tte; struct tte *tp; tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va); tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_REF | TD_P | flags; tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx", va, pa, tp, tp->tte_data); if ((tp->tte_data & TD_V) != 0) tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va); *tp = tte; } /* * Make a temporary mapping for a physical address. This is only intended * to be used for panic dumps. */ void * pmap_kenter_temporary(vm_offset_t pa, int i) { TODO; } /* * Remove a wired page from kernel virtual address space. */ void pmap_kremove(vm_offset_t va) { struct tte *tp; tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, tp->tte_data); atomic_clear_long(&tp->tte_data, TD_V); tp->tte_tag = 0; tp->tte_data = 0; tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va); } /* * Map a range of physical addresses into kernel virtual address space. * * The value passed in *virt is a suggested virtual address for the mapping. * Architectures which can support a direct-mapped physical to virtual region * can return the appropriate address within that region, leaving '*virt' * unchanged. We cannot and therefore do not; *virt is updated with the * first usable address after the mapped region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) { vm_offset_t sva; vm_offset_t va; sva = *virt; va = sva; for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) pmap_kenter(va, pa_start); *virt = va; return (sva); } /* * Map a list of wired pages into kernel virtual address space. This is * intended for temporary mappings which do not need page modification or * references recorded. Existing mappings in the region are overwritten. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); } /* * As above, but take an additional flags argument and call * pmap_kenter_flags(). */ void pmap_qenter_flags(vm_offset_t va, vm_page_t *m, int count, u_long fl) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kenter_flags(va, VM_PAGE_TO_PHYS(m[i]), fl); } /* * Remove page mappings from kernel virtual address space. Intended for * temporary mappings entered by pmap_qenter. */ void pmap_qremove(vm_offset_t va, int count) { int i; for (i = 0; i < count; i++, va += PAGE_SIZE) pmap_kremove(va); } /* * Create the uarea for a new process. * This routine directly affects the fork perf for a process. */ void pmap_new_proc(struct proc *p) { vm_object_t upobj; vm_offset_t up; vm_page_t m; u_int i; /* * Allocate object for the upages. */ upobj = p->p_upages_obj; if (upobj == NULL) { upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); p->p_upages_obj = upobj; } /* * Get a kernel virtual address for the U area for this process. */ up = (vm_offset_t)p->p_uarea; if (up == 0) { up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); if (up == 0) panic("pmap_new_proc: upage allocation failed"); p->p_uarea = (struct user *)up; } for (i = 0; i < UAREA_PAGES; i++) { /* * Get a uarea page. */ m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); /* * Wire the page. */ m->wire_count++; cnt.v_wire_count++; /* * Enter the page into the kernel address space. */ pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } } /* * Dispose the uarea for a process that has exited. * This routine directly impacts the exit perf of a process. */ void pmap_dispose_proc(struct proc *p) { vm_object_t upobj; vm_offset_t up; vm_page_t m; int i; upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_lookup(upobj, i); if (m == NULL) panic("pmap_dispose_proc: upage already missing?"); vm_page_busy(m); pmap_kremove(up + i * PAGE_SIZE); vm_page_unwire(m, 0); vm_page_free(m); } } /* * Allow the uarea for a process to be prejudicially paged out. */ void pmap_swapout_proc(struct proc *p) { vm_object_t upobj; vm_offset_t up; vm_page_t m; int i; upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_lookup(upobj, i); if (m == NULL) panic("pmap_swapout_proc: upage already missing?"); vm_page_dirty(m); vm_page_unwire(m, 0); pmap_kremove(up + i * PAGE_SIZE); } } /* * Bring the uarea for a specified process back in. */ void pmap_swapin_proc(struct proc *p) { vm_object_t upobj; vm_offset_t up; vm_page_t m; int rv; int i; upobj = p->p_upages_obj; up = (vm_offset_t)p->p_uarea; for (i = 0; i < UAREA_PAGES; i++) { m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(upobj, &m, 1, 0); if (rv != VM_PAGER_OK) panic("pmap_swapin_proc: cannot get upage"); m = vm_page_lookup(upobj, i); m->valid = VM_PAGE_BITS_ALL; } vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); } } /* * Create the kernel stack and pcb for a new thread. * This routine directly affects the fork perf for a process and * create performance for a thread. */ void pmap_new_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; u_int i; /* * Allocate object for the kstack, */ ksobj = td->td_kstack_obj; if (ksobj == NULL) { ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); td->td_kstack_obj = ksobj; } /* * Get a kernel virtual address for the kstack for this thread. */ ks = td->td_kstack; if (ks == 0) { ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); if (ks == 0) panic("pmap_new_thread: kstack allocation failed"); tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, ks); ks += KSTACK_GUARD_PAGES * PAGE_SIZE; td->td_kstack = ks; } for (i = 0; i < KSTACK_PAGES; i++) { /* * Get a kernel stack page. */ m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); /* * Wire the page. */ m->wire_count++; cnt.v_wire_count++; /* * Enter the page into the kernel address space. */ pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); vm_page_wakeup(m); vm_page_flag_clear(m, PG_ZERO); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); m->valid = VM_PAGE_BITS_ALL; } } /* * Dispose the kernel stack for a thread that has exited. * This routine directly impacts the exit perf of a process and thread. */ void pmap_dispose_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; int i; ksobj = td->td_kstack_obj; ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_dispose_proc: kstack already missing?"); vm_page_busy(m); pmap_kremove(ks + i * PAGE_SIZE); vm_page_unwire(m, 0); vm_page_free(m); } } /* * Allow the kernel stack for a thread to be prejudicially paged out. */ void pmap_swapout_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; int i; ksobj = td->td_kstack_obj; ks = (vm_offset_t)td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_lookup(ksobj, i); if (m == NULL) panic("pmap_swapout_thread: kstack already missing?"); vm_page_dirty(m); vm_page_unwire(m, 0); pmap_kremove(ks + i * PAGE_SIZE); } } /* * Bring the kernel stack for a specified thread back in. */ void pmap_swapin_thread(struct thread *td) { vm_object_t ksobj; vm_offset_t ks; vm_page_t m; int rv; int i; ksobj = td->td_kstack_obj; ks = td->td_kstack; for (i = 0; i < KSTACK_PAGES; i++) { m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); if (m->valid != VM_PAGE_BITS_ALL) { rv = vm_pager_get_pages(ksobj, &m, 1, 0); if (rv != VM_PAGER_OK) panic("pmap_swapin_proc: cannot get kstack"); m = vm_page_lookup(ksobj, i); m->valid = VM_PAGE_BITS_ALL; } vm_page_wire(m); vm_page_wakeup(m); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); } } /* * Initialize the pmap associated with process 0. */ void pmap_pinit0(pmap_t pm) { pm->pm_context = pmap_context_alloc(); pm->pm_active = 0; pm->pm_count = 1; pm->pm_tsb = NULL; pm->pm_tsb_obj = NULL; pm->pm_tsb_tte = NULL; TAILQ_INIT(&pm->pm_pvlist); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } /* * Initialize a preallocated and zeroed pmap structure, uch as one in a * vmspace structure. */ void pmap_pinit(pmap_t pm) { vm_page_t m; /* * Allocate kva space for the tsb. */ if (pm->pm_tsb == NULL) { pm->pm_tsb = (struct tte *)kmem_alloc_pageable(kernel_map, PAGE_SIZE_8K); pm->pm_tsb_tte = tsb_kvtotte((vm_offset_t)pm->pm_tsb); } /* * Allocate an object for it. */ if (pm->pm_tsb_obj == NULL) pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, 1); /* * Allocate the tsb page. */ m = vm_page_grab(pm->pm_tsb_obj, 0, VM_ALLOC_RETRY | VM_ALLOC_ZERO); if ((m->flags & PG_ZERO) == 0) pmap_zero_page(VM_PAGE_TO_PHYS(m)); m->wire_count++; cnt.v_wire_count++; vm_page_flag_clear(m, PG_MAPPED | PG_BUSY); m->valid = VM_PAGE_BITS_ALL; pmap_kenter((vm_offset_t)pm->pm_tsb, VM_PAGE_TO_PHYS(m)); pm->pm_active = 0; pm->pm_context = pmap_context_alloc(); pm->pm_count = 1; TAILQ_INIT(&pm->pm_pvlist); bzero(&pm->pm_stats, sizeof(pm->pm_stats)); } void pmap_pinit2(pmap_t pmap) { /* XXX: Remove this stub when no longer called */ } /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pm) { #ifdef INVARIANTS pv_entry_t pv; #endif vm_object_t obj; vm_page_t m; CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", pm->pm_context, pm->pm_tsb); obj = pm->pm_tsb_obj; KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1")); #ifdef INVARIANTS if (!TAILQ_EMPTY(&pm->pm_pvlist)) { TAILQ_FOREACH(pv, &pm->pm_pvlist, pv_plist) { CTR3(KTR_PMAP, "pmap_release: m=%p va=%#lx pa=%#lx", pv->pv_m, pv->pv_va, VM_PAGE_TO_PHYS(pv->pv_m)); } panic("pmap_release: leaking pv entries"); } #endif KASSERT(pmap_resident_count(pm) == 0, ("pmap_release: resident pages %ld != 0", pmap_resident_count(pm))); m = TAILQ_FIRST(&obj->memq); pmap_context_destroy(pm->pm_context); if (vm_page_sleep_busy(m, FALSE, "pmaprl")) return; vm_page_busy(m); KASSERT(m->hold_count == 0, ("pmap_release: freeing held tsb page")); pmap_kremove((vm_offset_t)pm->pm_tsb); m->wire_count--; cnt.v_wire_count--; vm_page_free_zero(m); } /* * Grow the number of kernel page table entries. Unneeded. */ void pmap_growkernel(vm_offset_t addr) { } /* * Retire the given physical map from service. Pmaps are always allocated * as part of a larger structure, so this never happens. */ void pmap_destroy(pmap_t pm) { panic("pmap_destroy: unimplemented"); } /* * Add a reference to the specified pmap. */ void pmap_reference(pmap_t pm) { if (pm != NULL) pm->pm_count++; } /* * This routine is very drastic, but can save the system * in a pinch. */ void pmap_collect(void) { static int warningdone; vm_page_t m; int i; if (pmap_pagedaemon_waken == 0) return; if (warningdone++ < 5) printf("pmap_collect: collecting pv entries -- suggest" "increasing PMAP_SHPGPERPROC\n"); for (i = 0; i < vm_page_array_size; i++) { m = &vm_page_array[i]; if (m->wire_count || m->hold_count || m->busy || (m->flags & (PG_BUSY | PG_UNMANAGED))) continue; pv_remove_all(m); } pmap_pagedaemon_waken = 0; } int pmap_remove_tte(struct pmap *pm, struct tte *tp, vm_offset_t va) { vm_page_t m; m = PHYS_TO_VM_PAGE(TD_GET_PA(tp->tte_data)); if ((tp->tte_data & TD_PV) != 0) { if ((tp->tte_data & TD_W) != 0 && pmap_track_modified(pm, va)) vm_page_dirty(m); if ((tp->tte_data & TD_REF) != 0) vm_page_flag_set(m, PG_REFERENCED); pv_remove(pm, m, va); pmap_cache_remove(m, va); } atomic_clear_long(&tp->tte_data, TD_V); tp->tte_tag = 0; tp->tte_data = 0; tlb_page_demap(TLB_ITLB | TLB_DTLB, pm->pm_context, va); if (PMAP_REMOVE_DONE(pm)) return (0); return (1); } /* * Remove the given range of addresses from the specified map. */ void pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) { struct tte *tp; CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx", pm->pm_context, start, end); if (PMAP_REMOVE_DONE(pm)) return; if (end - start > PMAP_TSB_THRESH) tsb_foreach(pm, start, end, pmap_remove_tte); else { for (; start < end; start += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, start)) != NULL) { if (!pmap_remove_tte(pm, tp, start)) break; } } } } int pmap_protect_tte(struct pmap *pm, struct tte *tp, vm_offset_t va) { vm_page_t m; u_long data; data = tp->tte_data; if ((data & TD_PV) != 0) { m = PHYS_TO_VM_PAGE(TD_GET_PA(data)); if ((data & TD_REF) != 0) { vm_page_flag_set(m, PG_REFERENCED); data &= ~TD_REF; } if ((data & TD_W) != 0 && pmap_track_modified(pm, va)) { vm_page_dirty(m); } } data &= ~(TD_W | TD_SW); CTR2(KTR_PMAP, "pmap_protect: new=%#lx old=%#lx", data, tp->tte_data); if (data != tp->tte_data) { CTR0(KTR_PMAP, "pmap_protect: demap"); tlb_page_demap(TLB_DTLB | TLB_ITLB, pm->pm_context, va); tp->tte_data = data; } return (0); } /* * Set the physical protection on the specified range of this map as requested. */ void pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { struct tte *tp; CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx", pm->pm_context, sva, eva, prot); KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_protect: non current pmap")); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pm, sva, eva); return; } if (prot & VM_PROT_WRITE) return; if (eva - sva > PMAP_TSB_THRESH) tsb_foreach(pm, sva, eva, pmap_protect_tte); else { for (; sva < eva; sva += PAGE_SIZE) { if ((tp = tsb_tte_lookup(pm, sva)) != NULL) pmap_protect_tte(pm, tp, sva); } } } /* * Map the given physical page at the specified virtual address in the * target pmap with the protection requested. If specified the page * will be wired down. */ void pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { struct tte otte; struct tte tte; struct tte *tp; vm_offset_t pa; vm_page_t om; pa = VM_PAGE_TO_PHYS(m); CTR6(KTR_PMAP, "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", pm->pm_context, m, va, pa, prot, wired); tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va); tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) | TD_CP; /* * If there is an existing mapping, and the physical address has not * changed, must be protection or wiring change. */ if ((tp = tsb_tte_lookup(pm, va)) != NULL) { otte = *tp; om = PHYS_TO_VM_PAGE(TD_GET_PA(otte.tte_data)); if (TD_GET_PA(otte.tte_data) == pa) { CTR0(KTR_PMAP, "pmap_enter: update"); /* * Wiring change, just update stats. */ if (wired) { if ((otte.tte_data & TD_WIRED) == 0) pm->pm_stats.wired_count++; } else { if ((otte.tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; } if ((otte.tte_data & TD_CV) != 0) tte.tte_data |= TD_CV; if ((otte.tte_data & TD_REF) != 0) tte.tte_data |= TD_REF; if ((otte.tte_data & TD_PV) != 0) { KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0, ("pmap_enter: unmanaged pv page")); tte.tte_data |= TD_PV; } /* * If we're turning off write protection, sense modify * status and remove the old mapping. */ if ((prot & VM_PROT_WRITE) == 0 && (otte.tte_data & (TD_W | TD_SW)) != 0) { if ((otte.tte_data & TD_PV) != 0) { if (pmap_track_modified(pm, va)) vm_page_dirty(m); } tlb_page_demap(TLB_DTLB | TLB_ITLB, TT_GET_CTX(otte.tte_tag), va); } } else { CTR0(KTR_PMAP, "pmap_enter: replace"); /* * Mapping has changed, invalidate old range. */ if (!wired && (otte.tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; /* * Enter on the pv list if part of our managed memory. */ if ((otte.tte_data & TD_PV) != 0) { KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0, ("pmap_enter: unmanaged pv page")); if ((otte.tte_data & TD_REF) != 0) vm_page_flag_set(om, PG_REFERENCED); if ((otte.tte_data & TD_W) != 0 && pmap_track_modified(pm, va)) vm_page_dirty(om); pv_remove(pm, om, va); pv_insert(pm, m, va); tte.tte_data |= TD_PV; pmap_cache_remove(om, va); if (pmap_cache_enter(m, va) != 0) tte.tte_data |= TD_CV; } tlb_page_demap(TLB_DTLB | TLB_ITLB, TT_GET_CTX(otte.tte_tag), va); } } else { CTR0(KTR_PMAP, "pmap_enter: new"); /* * Enter on the pv list if part of our managed memory. */ if (pmap_initialized && (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) { pv_insert(pm, m, va); tte.tte_data |= TD_PV; if (pmap_cache_enter(m, va) != 0) tte.tte_data |= TD_CV; } /* * Increment counters. */ if (wired) pm->pm_stats.wired_count++; } /* * Now validate mapping with desired protection/wiring. */ if (wired) { tte.tte_data |= TD_REF | TD_WIRED; if ((prot & VM_PROT_WRITE) != 0) tte.tte_data |= TD_W; } if (pm->pm_context == TLB_CTX_KERNEL) tte.tte_data |= TD_P; if (prot & VM_PROT_WRITE) tte.tte_data |= TD_SW; if (prot & VM_PROT_EXECUTE) { tte.tte_data |= TD_EXEC; icache_inval_phys(pa, pa + PAGE_SIZE - 1); } if (tp != NULL) *tp = tte; else tsb_tte_enter(pm, m, va, tte); } void pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_object_init_pt: non current pmap")); /* XXX */ } void pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) { KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_prefault: non current pmap")); /* XXX */ } /* * Change the wiring attribute for a map/virtual-address pair. * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) { struct tte *tp; if ((tp = tsb_tte_lookup(pm, va)) != NULL) { if (wired) { if ((tp->tte_data & TD_WIRED) == 0) pm->pm_stats.wired_count++; tp->tte_data |= TD_WIRED; } else { if ((tp->tte_data & TD_WIRED) != 0) pm->pm_stats.wired_count--; tp->tte_data &= ~TD_WIRED; } } } void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { /* XXX */ } /* * Zero a page of physical memory by temporarily mapping it into the tlb. */ void pmap_zero_page(vm_offset_t pa) { CTR1(KTR_PMAP, "pmap_zero_page: pa=%#lx", pa); dcache_inval_phys(pa, pa + PAGE_SIZE); aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); } void pmap_zero_page_area(vm_offset_t pa, int off, int size) { CTR3(KTR_PMAP, "pmap_zero_page_area: pa=%#lx off=%#x size=%#x", pa, off, size); KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size")); dcache_inval_phys(pa + off, pa + off + size); aszero(ASI_PHYS_USE_EC, pa + off, size); } /* * Copy a page of physical memory by temporarily mapping it into the tlb. */ void pmap_copy_page(vm_offset_t src, vm_offset_t dst) { CTR2(KTR_PMAP, "pmap_copy_page: src=%#lx dst=%#lx", src, dst); dcache_inval_phys(dst, dst + PAGE_SIZE); ascopy(ASI_PHYS_USE_EC, src, dst, PAGE_SIZE); } /* * Make the specified page pageable (or not). Unneeded. */ void pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) { } /* * Return true of a physical page resided in the given pmap. */ boolean_t pmap_page_exists(pmap_t pm, vm_page_t m) { if (m->flags & PG_FICTITIOUS) return (FALSE); return (pv_page_exists(pm, m)); } /* * Remove all pages from specified address space, this aids process exit * speeds. This is much faster than pmap_remove n the case of running down * an entire address space. Only works for the current pmap. */ void pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) { struct tte *tp; pv_entry_t npv; pv_entry_t pv; vm_page_t m; KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, ("pmap_remove_pages: non current pmap")); npv = NULL; for (pv = TAILQ_FIRST(&pm->pm_pvlist); pv != NULL; pv = npv) { npv = TAILQ_NEXT(pv, pv_plist); if (pv->pv_va >= eva || pv->pv_va < sva) continue; if ((tp = tsb_tte_lookup(pv->pv_pmap, pv->pv_va)) == NULL) continue; /* * We cannot remove wired pages at this time. */ if ((tp->tte_data & TD_WIRED) != 0) continue; atomic_clear_long(&tp->tte_data, TD_V); tp->tte_tag = 0; tp->tte_data = 0; m = pv->pv_m; pv->pv_pmap->pm_stats.resident_count--; m->md.pv_list_count--; pmap_cache_remove(m, pv->pv_va); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); pv_free(pv); } tlb_context_primary_demap(TLB_DTLB | TLB_ITLB); } /* * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) pv_bit_clear(m, TD_W | TD_SW); else pv_remove_all(m); } } vm_offset_t pmap_phys_address(int ppn) { return (sparc64_ptob(ppn)); } int pmap_ts_referenced(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return (0); return (pv_bit_count(m, TD_REF)); } boolean_t pmap_is_modified(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return FALSE; return (pv_bit_test(m, TD_W)); } void pmap_clear_modify(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return; pv_bit_clear(m, TD_W); } void pmap_clear_reference(vm_page_t m) { if (m->flags & PG_FICTITIOUS) return; pv_bit_clear(m, TD_REF); } int pmap_mincore(pmap_t pm, vm_offset_t addr) { TODO; return (0); } /* * Activate a user pmap. The pmap must be activated before its address space * can be accessed in any way. */ void pmap_activate(struct thread *td) { vm_offset_t tsb; u_long context; u_long data; pmap_t pm; /* * Load all the data we need up front to encourage the compiler to * not issue any loads while we have interrupts disable below. */ pm = &td->td_proc->p_vmspace->vm_pmap; context = pm->pm_context; data = pm->pm_tsb_tte->tte_data; tsb = (vm_offset_t)pm->pm_tsb; KASSERT(context != 0, ("pmap_activate: activating nucleus context")); KASSERT(context != -1, ("pmap_activate: steal context")); KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); pm->pm_active |= PCPU_GET(cpumask); wrpr(pstate, 0, PSTATE_MMU); __asm __volatile("mov %0, %%g7" : : "r" (tsb)); wrpr(pstate, 0, PSTATE_NORMAL); stxa(TLB_DEMAP_VA(tsb) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_DMMU_DEMAP, 0); membar(Sync); stxa(AA_DMMU_TAR, ASI_DMMU, tsb); stxa(0, ASI_DTLB_DATA_IN_REG, data | TD_L); membar(Sync); stxa(AA_DMMU_PCXR, ASI_DMMU, context); membar(Sync); wrpr(pstate, 0, PSTATE_KERNEL); } vm_offset_t pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) { return (va); } /* * Allocate a hardware context number from the context map. */ static u_int pmap_context_alloc(void) { u_int i; i = pmap_context_base; do { if (pmap_context_map[i] == 0) { pmap_context_map[i] = 1; pmap_context_base = (i + 1) & (PMAP_CONTEXT_MAX - 1); return (i); } } while ((i = (i + 1) & (PMAP_CONTEXT_MAX - 1)) != pmap_context_base); panic("pmap_context_alloc"); } /* * Free a hardware context number back to the context map. */ static void pmap_context_destroy(u_int i) { pmap_context_map[i] = 0; }