Index: head/sys/riscv/riscv/machdep.c =================================================================== --- head/sys/riscv/riscv/machdep.c (revision 363458) +++ head/sys/riscv/riscv/machdep.c (revision 363459) @@ -1,975 +1,975 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015-2017 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FPE #include #endif #ifdef FDT #include #include #include #endif static void get_fpcontext(struct thread *td, mcontext_t *mcp); static void set_fpcontext(struct thread *td, mcontext_t *mcp); struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; int early_boot = 1; int cold = 1; #define DTB_SIZE_MAX (1024 * 1024) vm_paddr_t physmap[PHYS_AVAIL_ENTRIES]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ #define BOOT_HART_INVALID 0xffffffff uint32_t boot_hart = BOOT_HART_INVALID; /* The hart we booted on. */ cpuset_t all_harts; extern int *end; static void cpu_startup(void *dummy) { sbi_print_version(); identify_cpu(); printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)realmem), ptoa((uintmax_t)realmem) / (1024 * 1024)); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { vm_paddr_t size; size = phys_avail[indx + 1] - phys_avail[indx]; printf( "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", (uintmax_t)phys_avail[indx], (uintmax_t)phys_avail[indx + 1] - 1, (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", ptoa((uintmax_t)vm_free_count()), ptoa((uintmax_t)vm_free_count()) / (1024 * 1024)); if (bootverbose) devmap_print_table(); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sepc = frame->tf_sepc; regs->sstatus = frame->tf_sstatus; regs->ra = frame->tf_ra; regs->sp = frame->tf_sp; regs->gp = frame->tf_gp; regs->tp = frame->tf_tp; memcpy(regs->t, frame->tf_t, sizeof(regs->t)); memcpy(regs->s, frame->tf_s, sizeof(regs->s)); memcpy(regs->a, frame->tf_a, sizeof(regs->a)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sepc = regs->sepc; frame->tf_ra = regs->ra; frame->tf_sp = regs->sp; frame->tf_gp = regs->gp; frame->tf_tp = regs->tp; memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t)); memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s)); memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef FPE struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running FPE instructions we will * need to save the state to memcpy it below. */ if (td == curthread) fpe_state_save(td); memcpy(regs->fp_x, pcb->pcb_x, sizeof(regs->fp_x)); regs->fp_fcsr = pcb->pcb_fcsr; } else #endif memset(regs, 0, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef FPE struct trapframe *frame; struct pcb *pcb; frame = td->td_frame; pcb = td->td_pcb; memcpy(pcb->pcb_x, regs->fp_x, sizeof(regs->fp_x)); pcb->pcb_fcsr = regs->fp_fcsr; pcb->pcb_fpflags |= PCB_FP_STARTED; frame->tf_sstatus &= ~SSTATUS_FS_MASK; frame->tf_sstatus |= SSTATUS_FS_CLEAN; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { td->td_frame->tf_sepc = addr; return (0); } int ptrace_single_step(struct thread *td) { /* TODO; */ return (EOPNOTSUPP); } int ptrace_clear_single_step(struct thread *td) { /* TODO; */ return (EOPNOTSUPP); } void exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack) { struct trapframe *tf; struct pcb *pcb; tf = td->td_frame; pcb = td->td_pcb; memset(tf, 0, sizeof(struct trapframe)); tf->tf_a[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_ra = imgp->entry_addr; tf->tf_sepc = imgp->entry_addr; pcb->pcb_fpflags &= ~PCB_FP_STARTED; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct gpregs *)0)->gp_a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct gpregs *)0)->gp_s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct gpregs *)0)->gp_t); CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct reg *)0)->a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct reg *)0)->s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct reg *)0)->t); /* Support for FDT configurations only. */ CTASSERT(FDT); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t)); memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s)); memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a)); if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_a[0] = 0; mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ } mcp->mc_gpregs.gp_ra = tf->tf_ra; mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_gp = tf->tf_gp; mcp->mc_gpregs.gp_tp = tf->tf_tp; mcp->mc_gpregs.gp_sepc = tf->tf_sepc; mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus; get_fpcontext(td, mcp); return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf; tf = td->td_frame; /* * Permit changes to the USTATUS bits of SSTATUS. * * Ignore writes to read-only bits (SD, XS). * * Ignore writes to the FS field as set_fpcontext() will set * it explicitly. */ if (((mcp->mc_gpregs.gp_sstatus ^ tf->tf_sstatus) & ~(SSTATUS_SD | SSTATUS_XS_MASK | SSTATUS_FS_MASK | SSTATUS_UPIE | SSTATUS_UIE)) != 0) return (EINVAL); memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t)); memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s)); memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a)); tf->tf_ra = mcp->mc_gpregs.gp_ra; tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_gp = mcp->mc_gpregs.gp_gp; tf->tf_sepc = mcp->mc_gpregs.gp_sepc; tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus; set_fpcontext(td, mcp); return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef FPE struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; KASSERT(td->td_pcb == curpcb, ("Invalid fpe pcb")); if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running FPE instructions we will * need to save the state to memcpy it below. */ fpe_state_save(td); KASSERT((curpcb->pcb_fpflags & ~PCB_FP_USERMASK) == 0, ("Non-userspace FPE flags set in get_fpcontext")); memcpy(mcp->mc_fpregs.fp_x, curpcb->pcb_x, - sizeof(mcp->mc_fpregs)); + sizeof(mcp->mc_fpregs.fp_x)); mcp->mc_fpregs.fp_fcsr = curpcb->pcb_fcsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef FPE struct pcb *curpcb; #endif td->td_frame->tf_sstatus &= ~SSTATUS_FS_MASK; td->td_frame->tf_sstatus |= SSTATUS_FS_OFF; #ifdef FPE critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* FPE usage is enabled, override registers. */ memcpy(curpcb->pcb_x, mcp->mc_fpregs.fp_x, - sizeof(mcp->mc_fpregs)); + sizeof(mcp->mc_fpregs.fp_x)); curpcb->pcb_fcsr = mcp->mc_fpregs.fp_fcsr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags & PCB_FP_USERMASK; td->td_frame->tf_sstatus |= SSTATUS_FS_CLEAN; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "fence \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* * Try to power down using the HSM SBI extension and fall back to a * simple wfi loop. */ intr_disable(); if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) sbi_hsm_hart_stop(); for (;;) __asm __volatile("wfi"); /* NOTREACHED */ } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { panic("cpu_est_clockrate"); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t reg; td = curthread; if (td->td_md.md_spinlock_count == 0) { reg = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = reg; critical_enter(); } else td->td_md.md_spinlock_count++; } void spinlock_exit(void) { struct thread *td; register_t sstatus_ie; td = curthread; sstatus_ie = td->td_md.md_saved_sstatus_ie; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) { critical_exit(); intr_restore(sstatus_ie); } } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; int error; if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s)); pcb->pcb_ra = tf->tf_sepc; pcb->pcb_sp = tf->tf_sp; pcb->pcb_gp = tf->tf_gp; pcb->pcb_tp = tf->tf_tp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe *fp, frame; struct sysentvec *sysent; struct trapframe *tf; struct sigacts *psp; struct thread *td; struct proc *p; int onstack; int sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ bzero(&frame, sizeof(frame)); get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack = td->td_sigstk; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) != 0 ? (onstack ? SS_ONSTACK : 0) : SS_DISABLE; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_a[0] = sig; tf->tf_a[1] = (register_t)&fp->sf_si; tf->tf_a[2] = (register_t)&fp->sf_uc; tf->tf_sepc = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_ra = (register_t)sysent->sv_sigcode_base; else tf->tf_ra = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup; pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_kstack_pages = KSTACK_PAGES; thread0.td_pcb = (struct pcb *)(thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static void cache_setup(void) { /* TODO */ dcache_line_size = 0; icache_line_size = 0; idcache_line_size = 0; } /* * Fake up a boot descriptor table. */ static void fake_preload_metadata(struct riscv_bootparams *rvbp) { static uint32_t fake_preload[48]; vm_offset_t lastaddr; size_t fake_size, dtb_size; #define PRELOAD_PUSH_VALUE(type, value) do { \ *(type *)((char *)fake_preload + fake_size) = (value); \ fake_size += sizeof(type); \ } while (0) #define PRELOAD_PUSH_STRING(str) do { \ uint32_t ssize; \ ssize = strlen(str) + 1; \ PRELOAD_PUSH_VALUE(uint32_t, ssize); \ strcpy(((char *)fake_preload + fake_size), str); \ fake_size += ssize; \ fake_size = roundup(fake_size, sizeof(u_long)); \ } while (0) fake_size = 0; lastaddr = (vm_offset_t)&end; PRELOAD_PUSH_VALUE(uint32_t, MODINFO_NAME); PRELOAD_PUSH_STRING("kernel"); PRELOAD_PUSH_VALUE(uint32_t, MODINFO_TYPE); PRELOAD_PUSH_STRING("elf kernel"); PRELOAD_PUSH_VALUE(uint32_t, MODINFO_ADDR); PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t)); PRELOAD_PUSH_VALUE(uint64_t, KERNBASE); PRELOAD_PUSH_VALUE(uint32_t, MODINFO_SIZE); PRELOAD_PUSH_VALUE(uint32_t, sizeof(size_t)); PRELOAD_PUSH_VALUE(uint64_t, (size_t)((vm_offset_t)&end - KERNBASE)); /* Copy the DTB to KVA space. */ lastaddr = roundup(lastaddr, sizeof(int)); PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_DTBP); PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t)); PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr); dtb_size = fdt_totalsize(rvbp->dtbp_virt); memmove((void *)lastaddr, (const void *)rvbp->dtbp_virt, dtb_size); lastaddr = roundup(lastaddr + dtb_size, sizeof(int)); PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_KERNEND); PRELOAD_PUSH_VALUE(uint32_t, sizeof(vm_offset_t)); PRELOAD_PUSH_VALUE(vm_offset_t, lastaddr); PRELOAD_PUSH_VALUE(uint32_t, MODINFO_METADATA | MODINFOMD_HOWTO); PRELOAD_PUSH_VALUE(uint32_t, sizeof(int)); PRELOAD_PUSH_VALUE(int, RB_VERBOSE); /* End marker */ PRELOAD_PUSH_VALUE(uint32_t, 0); PRELOAD_PUSH_VALUE(uint32_t, 0); preload_metadata = (caddr_t)fake_preload; /* Check if bootloader clobbered part of the kernel with the DTB. */ KASSERT(rvbp->dtbp_phys + dtb_size <= rvbp->kern_phys || rvbp->dtbp_phys >= rvbp->kern_phys + (lastaddr - KERNBASE), ("FDT (%lx-%lx) and kernel (%lx-%lx) overlap", rvbp->dtbp_phys, rvbp->dtbp_phys + dtb_size, rvbp->kern_phys, rvbp->kern_phys + (lastaddr - KERNBASE))); KASSERT(fake_size < sizeof(fake_preload), ("Too many fake_preload items")); if (boothowto & RB_VERBOSE) printf("FDT phys (%lx-%lx), kernel phys (%lx-%lx)\n", rvbp->dtbp_phys, rvbp->dtbp_phys + dtb_size, rvbp->kern_phys, rvbp->kern_phys + (lastaddr - KERNBASE)); } #ifdef FDT static void parse_fdt_bootargs(void) { char bootargs[512]; bootargs[sizeof(bootargs) - 1] = '\0'; if (fdt_get_chosen_bootargs(bootargs, sizeof(bootargs) - 1) == 0) { boothowto |= boot_parse_cmdline(bootargs); } } #endif static vm_offset_t parse_metadata(void) { caddr_t kmdp; vm_offset_t lastaddr; #ifdef DDB vm_offset_t ksym_start, ksym_end; #endif char *kern_envp; /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); KASSERT(kmdp != NULL, ("No preload metadata found!")); /* Read the boot metadata */ boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); if (kern_envp != NULL) init_static_kenv(kern_envp, 0); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif #ifdef FDT try_load_dtb(kmdp); if (kern_envp == NULL) parse_fdt_bootargs(); #endif return (lastaddr); } void initriscv(struct riscv_bootparams *rvbp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pcpu *pcpup; int mem_regions_sz; vm_offset_t lastaddr; vm_size_t kernlen; #ifdef FDT phandle_t chosen; uint32_t hart; #endif TSRAW(&thread0, TS_ENTER, __func__, NULL); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* Set the pcpu pointer */ __asm __volatile("mv tp, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Initialize SBI interface. */ sbi_init(); /* Parse the boot metadata. */ if (rvbp->modulep != 0) { preload_metadata = (caddr_t)rvbp->modulep; } else { fake_preload_metadata(rvbp); } lastaddr = parse_metadata(); #ifdef FDT /* * Look for the boot hart ID. This was either passed in directly from * the SBI firmware and handled by locore, or was stored in the device * tree by an earlier boot stage. */ chosen = OF_finddevice("/chosen"); if (OF_getencprop(chosen, "boot-hartid", &hart, sizeof(hart)) != -1) { boot_hart = hart; } #endif if (boot_hart == BOOT_HART_INVALID) { panic("Boot hart ID was not properly set"); } pcpup->pc_hart = boot_hart; #ifdef FDT /* * Exclude reserved memory specified by the device tree. Typically, * this contains an entry for memory used by the runtime SBI firmware. */ if (fdt_get_reserved_mem(mem_regions, &mem_regions_sz) == 0) { physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); } /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, NULL) != 0) { panic("Cannot get physical memory regions"); } physmem_hardware_regions(mem_regions, mem_regions_sz); #endif /* Do basic tuning, hz etc */ init_param1(); cache_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ kernlen = (lastaddr - KERNBASE); pmap_bootstrap(rvbp->kern_l1pt, rvbp->kern_phys, kernlen); #ifdef FDT /* * XXX: Exclude the lowest 2MB of physical memory, if it hasn't been * already, as this area is assumed to contain the SBI firmware. This * is a little fragile, but it is consistent with the platforms we * support so far. * * TODO: remove this when the all regular booting methods properly * report their reserved memory in the device tree. */ if (mem_regions[0].mr_start == physmap[0]) { physmem_exclude_region(mem_regions[0].mr_start, L2_SIZE, EXFLAG_NODUMP | EXFLAG_NOALLOC); } #endif physmem_init_kernel_globals(); /* Establish static device mappings */ devmap_bootstrap(0, NULL); cninit(); init_proc0(rvbp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); kdb_init(); if (boothowto & RB_VERBOSE) physmem_print_tables(); early_boot = 0; TSEXIT(); } #undef bzero void bzero(void *buf, size_t len) { uint8_t *p; p = buf; while(len-- > 0) *p++ = 0; }