Index: head/sys/riscv/include/vmparam.h =================================================================== --- head/sys/riscv/include/vmparam.h (revision 361586) +++ head/sys/riscv/include/vmparam.h (revision 361587) @@ -1,241 +1,239 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91 * from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30 * $FreeBSD$ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ /* * Virtual memory related constants, all in bytes */ #ifndef MAXTSIZ #define MAXTSIZ (1*1024*1024*1024) /* max text size */ #endif #ifndef DFLDSIZ #define DFLDSIZ (128*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ #define MAXDSIZ (1*1024*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ #define DFLSSIZ (128*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (1*1024*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128*1024) /* amount to grow stack */ #endif /* * The physical address space is sparsely populated. */ #define VM_PHYSSEG_SPARSE /* * The number of PHYSSEG entries. */ #define VM_PHYSSEG_MAX 64 /* * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool * from which physical pages are allocated and VM_FREEPOOL_DIRECT is * the pool from which physical pages for small UMA objects are * allocated. */ #define VM_NFREEPOOL 2 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 1 /* * Create one free page list: VM_FREELIST_DEFAULT is for all physical * pages. */ #define VM_NFREELIST 1 #define VM_FREELIST_DEFAULT 0 /* * An allocation size of 16MB is supported in order to optimize the * use of the direct map by UMA. Specifically, a cache line contains * at most four TTEs, collectively mapping 16MB of physical memory. * By reducing the number of distinct 16MB "pages" that are used by UMA, * the physical memory allocator reduces the likelihood of both 4MB * page TLB misses and cache misses caused by 4MB page TLB misses. */ #define VM_NFREEORDER 12 /* * Enable superpage reservations: 1 level. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 1 #endif /* * Level 0 reservations consist of 512 pages. */ #ifndef VM_LEVEL_0_ORDER #define VM_LEVEL_0_ORDER 9 #endif /** * Address space layout. * * RISC-V implements multiple paging modes with different virtual address space * sizes: SV32, SV39 and SV48. SV39 permits a virtual address space size of * 512GB and uses a three-level page table. Since this is large enough for most * purposes, we currently use SV39 for both userland and the kernel, avoiding * the extra translation step required by SV48. * * The address space is split into two regions at each end of the 64-bit address * space: * * 0x0000000000000000 - 0x0000003fffffffff 256GB user map * 0x0000004000000000 - 0xffffffbfffffffff unmappable * 0xffffffc000000000 - 0xffffffc7ffffffff 32GB kernel map * 0xffffffc800000000 - 0xffffffcfffffffff 32GB unused * 0xffffffd000000000 - 0xffffffefffffffff 128GB direct map * 0xfffffff000000000 - 0xffffffffffffffff 64GB unused * * The kernel is loaded at the beginning of the kernel map. * * We define some interesting address constants: * * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire * 64 bit address space, mostly just for convenience. * * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of * mappable kernel virtual address space. * * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the * user address space. */ #define VM_MIN_ADDRESS (0x0000000000000000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) #define VM_MIN_KERNEL_ADDRESS (0xffffffc000000000UL) #define VM_MAX_KERNEL_ADDRESS (0xffffffc800000000UL) #define DMAP_MIN_ADDRESS (0xffffffd000000000UL) #define DMAP_MAX_ADDRESS (0xfffffff000000000UL) #define DMAP_MIN_PHYSADDR (dmap_phys_base) #define DMAP_MAX_PHYSADDR (dmap_phys_max) /* True if pa is in the dmap range */ #define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \ (pa) < DMAP_MAX_PHYSADDR) /* True if va is in the dmap range */ #define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \ (va) < (dmap_max_addr)) #define PMAP_HAS_DMAP 1 #define PHYS_TO_DMAP(pa) \ ({ \ KASSERT(PHYS_IN_DMAP(pa), \ ("%s: PA out of range, PA: 0x%lx", __func__, \ (vm_paddr_t)(pa))); \ ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \ }) #define DMAP_TO_PHYS(va) \ ({ \ KASSERT(VIRT_IN_DMAP(va), \ ("%s: VA out of range, VA: 0x%lx", __func__, \ (vm_offset_t)(va))); \ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \ }) #define VM_MIN_USER_ADDRESS (0x0000000000000000UL) #define VM_MAX_USER_ADDRESS (0x0000004000000000UL) #define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS) #define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS) #define KERNBASE (VM_MIN_KERNEL_ADDRESS) #define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) #define USRSTACK SHAREDPAGE -#define KERNENTRY (0) - #define VM_EARLY_DTB_ADDRESS (VM_MAX_KERNEL_ADDRESS - (2 * L2_SIZE)) /* * How many physical pages per kmem arena virtual page. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (3) #endif /* * Optional floor (in bytes) on the size of the kmem arena. */ #ifndef VM_KMEM_SIZE_MIN #define VM_KMEM_SIZE_MIN (16 * 1024 * 1024) #endif /* * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the * kernel map. */ #ifndef VM_KMEM_SIZE_MAX #define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \ VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5) #endif /* * Initial pagein size of beginning of executable file. */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #define UMA_MD_SMALL_ALLOC #ifndef LOCORE extern vm_paddr_t dmap_phys_base; extern vm_paddr_t dmap_phys_max; extern vm_offset_t dmap_max_addr; extern vm_offset_t vm_max_kernel_address; extern vm_offset_t init_pt_va; #endif #define ZERO_REGION_SIZE (64 * 1024) /* 64KB */ #define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS #endif /* !_MACHINE_VMPARAM_H_ */ Index: head/sys/riscv/riscv/machdep.c =================================================================== --- head/sys/riscv/riscv/machdep.c (revision 361586) +++ head/sys/riscv/riscv/machdep.c (revision 361587) @@ -1,909 +1,917 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015-2017 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FPE #include #endif #ifdef FDT #include #include #include #endif static void get_fpcontext(struct thread *td, mcontext_t *mcp); static void set_fpcontext(struct thread *td, mcontext_t *mcp); struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; int early_boot = 1; int cold = 1; #define DTB_SIZE_MAX (1024 * 1024) vm_paddr_t physmap[PHYS_AVAIL_ENTRIES]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ uint32_t boot_hart; /* The hart we booted on. */ cpuset_t all_harts; extern int *end; static void cpu_startup(void *dummy) { sbi_print_version(); identify_cpu(); printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem), ptoa((uintmax_t)realmem) / (1024 * 1024)); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size; size = phys_avail[indx + 1] - phys_avail[indx]; printf( "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_free_count()), ptoa((uintmax_t)vm_free_count()) / (1024 * 1024)); if (bootverbose) devmap_print_table(); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sepc = frame->tf_sepc; regs->sstatus = frame->tf_sstatus; regs->ra = frame->tf_ra; regs->sp = frame->tf_sp; regs->gp = frame->tf_gp; regs->tp = frame->tf_tp; memcpy(regs->t, frame->tf_t, sizeof(regs->t)); memcpy(regs->s, frame->tf_s, sizeof(regs->s)); memcpy(regs->a, frame->tf_a, sizeof(regs->a)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sepc = regs->sepc; frame->tf_ra = regs->ra; frame->tf_sp = regs->sp; frame->tf_gp = regs->gp; frame->tf_tp = regs->tp; memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t)); memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s)); memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef FPE struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running FPE instructions we will * need to save the state to memcpy it below. */ if (td == curthread) fpe_state_save(td); memcpy(regs->fp_x, pcb->pcb_x, sizeof(regs->fp_x)); regs->fp_fcsr = pcb->pcb_fcsr; } else #endif memset(regs, 0, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef FPE struct trapframe *frame; struct pcb *pcb; frame = td->td_frame; pcb = td->td_pcb; memcpy(pcb->pcb_x, regs->fp_x, sizeof(regs->fp_x)); pcb->pcb_fcsr = regs->fp_fcsr; pcb->pcb_fpflags |= PCB_FP_STARTED; frame->tf_sstatus &= ~SSTATUS_FS_MASK; frame->tf_sstatus |= SSTATUS_FS_CLEAN; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_sepc = addr; return (0); } int ptrace_single_step(struct thread *td) { /* TODO; */ return (EOPNOTSUPP); } int ptrace_clear_single_step(struct thread *td) { /* TODO; */ return (EOPNOTSUPP); } void exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf; struct pcb *pcb; tf = td->td_frame; pcb = td->td_pcb; memset(tf, 0, sizeof(struct trapframe)); tf->tf_a[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_ra = imgp->entry_addr; tf->tf_sepc = imgp->entry_addr; pcb->pcb_fpflags &= ~PCB_FP_STARTED; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct gpregs *)0)->gp_a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct gpregs *)0)->gp_s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct gpregs *)0)->gp_t); CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct reg *)0)->a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct reg *)0)->s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct reg *)0)->t); /* Support for FDT configurations only. */ CTASSERT(FDT); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t)); memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s)); memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a)); if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_a[0] = 0; mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ } mcp->mc_gpregs.gp_ra = tf->tf_ra; mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_gp = tf->tf_gp; mcp->mc_gpregs.gp_tp = tf->tf_tp; mcp->mc_gpregs.gp_sepc = tf->tf_sepc; mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus; get_fpcontext(td, mcp); return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf; tf = td->td_frame; /* * Permit changes to the USTATUS bits of SSTATUS. * * Ignore writes to read-only bits (SD, XS). * * Ignore writes to the FS field as set_fpcontext() will set * it explicitly. */ if (((mcp->mc_gpregs.gp_sstatus ^ tf->tf_sstatus) & ~(SSTATUS_SD | SSTATUS_XS_MASK | SSTATUS_FS_MASK | SSTATUS_UPIE | SSTATUS_UIE)) != 0) return (EINVAL); memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t)); memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s)); memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a)); tf->tf_ra = mcp->mc_gpregs.gp_ra; tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_gp = mcp->mc_gpregs.gp_gp; tf->tf_sepc = mcp->mc_gpregs.gp_sepc; tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus; set_fpcontext(td, mcp); return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef FPE struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; KASSERT(td->td_pcb == curpcb, ("Invalid fpe pcb")); if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running FPE instructions we will * need to save the state to memcpy it below. */ fpe_state_save(td); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPE flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_x, curpcb->pcb_x, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_fcsr = curpcb->pcb_fcsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef FPE struct pcb *curpcb; #endif td->td_frame->tf_sstatus &= ~SSTATUS_FS_MASK; td->td_frame->tf_sstatus |= SSTATUS_FS_OFF; #ifdef FPE critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* FPE usage is enabled, override registers. */ memcpy(curpcb->pcb_x, mcp->mc_fpregs.fp_x, sizeof(mcp->mc_fpregs)); curpcb->pcb_fcsr = mcp->mc_fpregs.fp_fcsr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; td->td_frame->tf_sstatus |= SSTATUS_FS_CLEAN; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "fence \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* * Try to power down using the HSM SBI extension and fall back to a * simple wfi loop. */ intr_disable(); if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) sbi_hsm_hart_stop(); for (;;) __asm __volatile("wfi"); /* NOTREACHED */ } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { panic("cpu_est_clockrate"); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t reg; td = curthread; if (td->td_md.md_spinlock_count == 0) { reg = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = reg; critical_enter(); } else td->td_md.md_spinlock_count++; } void spinlock_exit(void) { struct thread *td; register_t sstatus_ie; td = curthread; sstatus_ie = td->td_md.md_saved_sstatus_ie; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) { critical_exit(); intr_restore(sstatus_ie); } } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s)); pcb->pcb_ra = tf->tf_sepc; pcb->pcb_sp = tf->tf_sp; pcb->pcb_gp = tf->tf_gp; pcb->pcb_tp = tf->tf_tp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe *fp, frame; struct sysentvec *sysent; struct trapframe *tf; struct sigacts *psp; struct thread *td; struct proc *p; int onstack; int sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack = td->td_sigstk; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ? (onstack ? SS_ONSTACK : 0) : SS_DISABLE; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_a[0] = sig; tf->tf_a[1] = (register_t)&fp->sf_si; tf->tf_a[2] = (register_t)&fp->sf_uc; tf->tf_sepc = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_ra = (register_t)sysent->sv_sigcode_base; else tf->tf_ra = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup; pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_kstack_pages = KSTACK_PAGES; thread0.td_pcb = (struct pcb *)(thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static void cache_setup(void) { /* TODO */ dcache_line_size = 0; icache_line_size = 0; idcache_line_size = 0; } /* * Fake up a boot descriptor table. * RISCVTODO: This needs to be done via loader (when it's available). */ vm_offset_t fake_preload_metadata(struct riscv_bootparams *rvbp) { static uint32_t fake_preload[35]; #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; - size_t dtb_size; - int i; + size_t fake_size, dtb_size; - i = 0; +#define PRELOAD_PUSH_VALUE(type, value) do { \ + *(type *)((char *)fake_preload + fake_size) = (value); \ + fake_size += sizeof(type); \ +} while (0) - fake_preload[i++] = MODINFO_NAME; - fake_preload[i++] = strlen("kernel") + 1; - strcpy((char*)&fake_preload[i++], "kernel"); - i += 1; - fake_preload[i++] = MODINFO_TYPE; - fake_preload[i++] = strlen("elf64 kernel") + 1; - strcpy((char*)&fake_preload[i++], "elf64 kernel"); - i += 3; - fake_preload[i++] = MODINFO_ADDR; - fake_preload[i++] = sizeof(vm_offset_t); - *(vm_offset_t *)&fake_preload[i++] = - (vm_offset_t)(KERNBASE + KERNENTRY); - i += 1; - fake_preload[i++] = MODINFO_SIZE; - fake_preload[i++] = sizeof(vm_offset_t); - fake_preload[i++] = (vm_offset_t)&end - - (vm_offset_t)(KERNBASE + KERNENTRY); - i += 1; +#define PRELOAD_PUSH_STRING(str) do { \ + uint32_t ssize; \ + ssize = strlen(str) + 1; \ + PRELOAD_PUSH_VALUE(uint32_t, ssize); \ + strcpy(((char *)fake_preload + fake_size), str); \ + fake_size += ssize; \ + fake_size = roundup(fake_size, sizeof(u_long)); \ +} while (0) + + fake_size = 0; + + PRELOAD_PUSH_VALUE(uint32_t, MODINFO_NAME); + PRELOAD_PUSH_STRING("kernel"); + PRELOAD_PUSH_VALUE(uint32_t, MODINFO_TYPE); + PRELOAD_PUSH_STRING("elf kernel"); + + PRELOAD_PUSH_VALUE(uint32_t, MODINFO_ADDR); + PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t)); + PRELOAD_PUSH_VALUE(uint64_t, KERNBASE); + + PRELOAD_PUSH_VALUE(uint32_t, MODINFO_SIZE); + PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t)); + PRELOAD_PUSH_VALUE(uint64_t, (size_t)((vm_offset_t)&end - KERNBASE)); #ifdef DDB #if 0 /* RISCVTODO */ if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif #endif lastaddr = (vm_offset_t)&end; /* Copy the DTB to KVA space. */ lastaddr = roundup(lastaddr, sizeof(int)); - fake_preload[i++] = MODINFO_METADATA | MODINFOMD_DTBP; - fake_preload[i++] = sizeof(vm_offset_t); - *(vm_offset_t *)&fake_preload[i] = (vm_offset_t)lastaddr; - i += sizeof(vm_offset_t) / sizeof(uint32_t); + PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP); + PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t)); + PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr); dtb_size = fdt_totalsize(rvbp->dtbp_virt); memmove((void *)lastaddr, (const void *)rvbp->dtbp_virt, dtb_size); lastaddr = roundup(lastaddr + dtb_size, sizeof(int)); - fake_preload[i++] = 0; - fake_preload[i] = 0; - preload_metadata = (void *)fake_preload; + /* End marker */ + PRELOAD_PUSH_VALUE(uint32_t, 0); + PRELOAD_PUSH_VALUE(uint32_t, 0); + preload_metadata = (caddr_t)fake_preload; - KASSERT(i < nitems(fake_preload), ("Too many fake_preload items")); + KASSERT(fake_size < sizeof(fake_preload), + ("Too many fake_preload items")); return (lastaddr); } void initriscv(struct riscv_bootparams *rvbp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pcpu *pcpup; int mem_regions_sz; vm_offset_t lastaddr; vm_size_t kernlen; caddr_t kmdp; TSRAW(&thread0, TS_ENTER, __func__, NULL); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); pcpup->pc_hart = boot_hart; /* Set the pcpu pointer */ __asm __volatile("mv tp, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Initialize SBI interface. */ sbi_init(); /* Set the module data location */ lastaddr = fake_preload_metadata(rvbp); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = RB_VERBOSE | RB_SINGLE; boothowto = RB_VERBOSE; kern_envp = NULL; #ifdef FDT try_load_dtb(kmdp); /* * Exclude reserved memory specified by the device tree. Typically, * this contains an entry for memory used by the runtime SBI firmware. */ if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) { physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); } /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) { panic("Cannot get physical memory regions"); } physmem_hardware_regions(mem_regions, mem_regions_sz); #endif /* Do basic tuning, hz etc */ init_param1(); cache_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ kernlen = (lastaddr - KERNBASE); pmap_bootstrap(rvbp->kern_l1pt, rvbp->kern_phys, kernlen); #ifdef FDT /* * XXX: Exclude the lowest 2MB of physical memory, if it hasn't been * already, as this area is assumed to contain the SBI firmware. This * is a little fragile, but it is consistent with the platforms we * support so far. * * TODO: remove this when the all regular booting methods properly * report their reserved memory in the device tree. */ if (mem_regions[0].mr_start == physmap[0]) { physmem_exclude_region(mem_regions[0].mr_start, L2_SIZE, EXFLAG_NODUMP | EXFLAG_NOALLOC); } #endif physmem_init_kernel_globals(); /* Establish static device mappings */ devmap_bootstrap(0, NULL); cninit(); init_proc0(rvbp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); kdb_init(); if (boothowto & RB_VERBOSE) physmem_print_tables(); early_boot = 0; TSEXIT(); } #undef bzero void bzero(void *buf, size_t len) { uint8_t *p; p = buf; while(len-- > 0) *p++ = 0; }