Index: head/sys/riscv/include/sbi.h =================================================================== --- head/sys/riscv/include/sbi.h (revision 367066) +++ head/sys/riscv/include/sbi.h (revision 367067) @@ -1,242 +1,235 @@ /*- * Copyright (c) 2016-2017 Ruslan Bukin * All rights reserved. * Copyright (c) 2019 Mitchell Horne * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SBI_H_ #define _MACHINE_SBI_H_ /* SBI Specification Version */ #define SBI_SPEC_VERS_MAJOR_OFFSET 24 #define SBI_SPEC_VERS_MAJOR_MASK (0x7F << SBI_SPEC_VERS_MAJOR_OFFSET) #define SBI_SPEC_VERS_MINOR_OFFSET 0 #define SBI_SPEC_VERS_MINOR_MASK (0xFFFFFF << SBI_SPEC_VERS_MINOR_OFFSET) /* SBI Implementation IDs */ #define SBI_IMPL_ID_BBL 0 #define SBI_IMPL_ID_OPENSBI 1 /* SBI Error Codes */ #define SBI_SUCCESS 0 #define SBI_ERR_FAILURE -1 #define SBI_ERR_NOT_SUPPORTED -2 #define SBI_ERR_INVALID_PARAM -3 #define SBI_ERR_DENIED -4 #define SBI_ERR_INVALID_ADDRESS -5 #define SBI_ERR_ALREADY_AVAILABLE -6 /* SBI Base Extension */ #define SBI_EXT_ID_BASE 0x10 #define SBI_BASE_GET_SPEC_VERSION 0 #define SBI_BASE_GET_IMPL_ID 1 #define SBI_BASE_GET_IMPL_VERSION 2 #define SBI_BASE_PROBE_EXTENSION 3 #define SBI_BASE_GET_MVENDORID 4 #define SBI_BASE_GET_MARCHID 5 #define SBI_BASE_GET_MIMPID 6 /* Hart State Management (HSM) Extension */ #define SBI_EXT_ID_HSM 0x48534D #define SBI_HSM_HART_START 0 #define SBI_HSM_HART_STOP 1 #define SBI_HSM_HART_STATUS 2 #define SBI_HSM_STATUS_STARTED 0 #define SBI_HSM_STATUS_STOPPED 1 #define SBI_HSM_STATUS_START_PENDING 2 #define SBI_HSM_STATUS_STOP_PENDING 3 /* Legacy Extensions */ #define SBI_SET_TIMER 0 #define SBI_CONSOLE_PUTCHAR 1 #define SBI_CONSOLE_GETCHAR 2 #define SBI_CLEAR_IPI 3 #define SBI_SEND_IPI 4 #define SBI_REMOTE_FENCE_I 5 #define SBI_REMOTE_SFENCE_VMA 6 #define SBI_REMOTE_SFENCE_VMA_ASID 7 #define SBI_SHUTDOWN 8 #define SBI_CALL0(e, f) SBI_CALL4(e, f, 0, 0, 0, 0) #define SBI_CALL1(e, f, p1) SBI_CALL4(e, f, p1, 0, 0, 0) #define SBI_CALL2(e, f, p1, p2) SBI_CALL4(e, f, p1, p2, 0, 0) #define SBI_CALL3(e, f, p1, p2, p3) SBI_CALL4(e, f, p1, p2, p3, 0) #define SBI_CALL4(e, f, p1, p2, p3, p4) sbi_call(e, f, p1, p2, p3, p4) /* * Documentation available at * https://github.com/riscv/riscv-sbi-doc/blob/master/riscv-sbi.adoc */ struct sbi_ret { long error; long value; }; static __inline struct sbi_ret sbi_call(uint64_t arg7, uint64_t arg6, uint64_t arg0, uint64_t arg1, uint64_t arg2, uint64_t arg3) { struct sbi_ret ret; register uintptr_t a0 __asm ("a0") = (uintptr_t)(arg0); register uintptr_t a1 __asm ("a1") = (uintptr_t)(arg1); register uintptr_t a2 __asm ("a2") = (uintptr_t)(arg2); register uintptr_t a3 __asm ("a3") = (uintptr_t)(arg3); register uintptr_t a6 __asm ("a6") = (uintptr_t)(arg6); register uintptr_t a7 __asm ("a7") = (uintptr_t)(arg7); __asm __volatile( \ "ecall" \ :"+r"(a0), "+r"(a1) \ :"r"(a2), "r"(a3), "r"(a6), "r"(a7) \ :"memory"); ret.error = a0; ret.value = a1; return (ret); } /* Base extension functions and variables. */ extern u_long sbi_spec_version; extern u_long sbi_impl_id; extern u_long sbi_impl_version; static __inline long sbi_probe_extension(long id) { return (SBI_CALL1(SBI_EXT_ID_BASE, SBI_BASE_PROBE_EXTENSION, id).value); } /* Hart State Management extension functions. */ /* * Start execution on the specified hart at physical address start_addr. The * register a0 will contain the hart's ID, and a1 will contain the value of * priv. */ int sbi_hsm_hart_start(u_long hart, u_long start_addr, u_long priv); /* * Stop execution on the current hart. Interrupts should be disabled, or this * function may return. */ void sbi_hsm_hart_stop(void); /* * Get the execution status of the specified hart. The status will be one of: * - SBI_HSM_STATUS_STARTED * - SBI_HSM_STATUS_STOPPED * - SBI_HSM_STATUS_START_PENDING * - SBI_HSM_STATUS_STOP_PENDING */ int sbi_hsm_hart_status(u_long hart); /* Legacy extension functions. */ static __inline void sbi_console_putchar(int ch) { (void)SBI_CALL1(SBI_CONSOLE_PUTCHAR, 0, ch); } static __inline int sbi_console_getchar(void) { /* * XXX: The "error" is returned here because legacy SBI functions * continue to return their value in a0. */ return (SBI_CALL0(SBI_CONSOLE_GETCHAR, 0).error); } static __inline void sbi_set_timer(uint64_t val) { (void)SBI_CALL1(SBI_SET_TIMER, 0, val); } static __inline void sbi_shutdown(void) { (void)SBI_CALL0(SBI_SHUTDOWN, 0); } static __inline void -sbi_clear_ipi(void) -{ - - (void)SBI_CALL0(SBI_CLEAR_IPI, 0); -} - -static __inline void sbi_send_ipi(const unsigned long *hart_mask) { (void)SBI_CALL1(SBI_SEND_IPI, 0, (uint64_t)hart_mask); } static __inline void sbi_remote_fence_i(const unsigned long *hart_mask) { (void)SBI_CALL1(SBI_REMOTE_FENCE_I, 0, (uint64_t)hart_mask); } static __inline void sbi_remote_sfence_vma(const unsigned long *hart_mask, unsigned long start, unsigned long size) { (void)SBI_CALL3(SBI_REMOTE_SFENCE_VMA, 0, (uint64_t)hart_mask, start, size); } static __inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, unsigned long start, unsigned long size, unsigned long asid) { (void)SBI_CALL4(SBI_REMOTE_SFENCE_VMA_ASID, 0, (uint64_t)hart_mask, start, size, asid); } void sbi_print_version(void); void sbi_init(void); #endif /* !_MACHINE_SBI_H_ */ Index: head/sys/riscv/riscv/mp_machdep.c =================================================================== --- head/sys/riscv/riscv/mp_machdep.c (revision 367066) +++ head/sys/riscv/riscv/mp_machdep.c (revision 367067) @@ -1,550 +1,550 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * Copyright (c) 2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_kstack_pages.h" #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif boolean_t ofw_cpu_reg(phandle_t node, u_int, cell_t *); uint32_t __riscv_boot_ap[MAXCPU]; static enum { CPUS_UNKNOWN, #ifdef FDT CPUS_FDT, #endif } cpu_enum_method; static device_identify_t riscv64_cpu_identify; static device_probe_t riscv64_cpu_probe; static device_attach_t riscv64_cpu_attach; static int ipi_handler(void *); struct pcb stoppcbs[MAXCPU]; extern uint32_t boot_hart; extern cpuset_t all_harts; #ifdef INVARIANTS static uint32_t cpu_reg[MAXCPU][2]; #endif static device_t cpu_list[MAXCPU]; void mpentry(u_long hartid); void init_secondary(uint64_t); static struct mtx ap_boot_mtx; /* Stacks for AP initialization, discarded once idle threads are started. */ void *bootstack; static void *bootstacks[MAXCPU]; /* Count of started APs, used to synchronize access to bootstack. */ static volatile int aps_started; /* Set to 1 once we're ready to let the APs out of the pen. */ static volatile int aps_ready; /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; static device_method_t riscv64_cpu_methods[] = { /* Device interface */ DEVMETHOD(device_identify, riscv64_cpu_identify), DEVMETHOD(device_probe, riscv64_cpu_probe), DEVMETHOD(device_attach, riscv64_cpu_attach), DEVMETHOD_END }; static devclass_t riscv64_cpu_devclass; static driver_t riscv64_cpu_driver = { "riscv64_cpu", riscv64_cpu_methods, 0 }; DRIVER_MODULE(riscv64_cpu, cpu, riscv64_cpu_driver, riscv64_cpu_devclass, 0, 0); static void riscv64_cpu_identify(driver_t *driver, device_t parent) { if (device_find_child(parent, "riscv64_cpu", -1) != NULL) return; if (BUS_ADD_CHILD(parent, 0, "riscv64_cpu", -1) == NULL) device_printf(parent, "add child failed\n"); } static int riscv64_cpu_probe(device_t dev) { u_int cpuid; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); device_quiet(dev); return (0); } static int riscv64_cpu_attach(device_t dev) { const uint32_t *reg; size_t reg_size; u_int cpuid; int i; cpuid = device_get_unit(dev); if (cpuid >= MAXCPU || cpuid > mp_maxid) return (EINVAL); KASSERT(cpu_list[cpuid] == NULL, ("Already have cpu %u", cpuid)); reg = cpu_get_cpuid(dev, ®_size); if (reg == NULL) return (EINVAL); if (bootverbose) { device_printf(dev, "register <"); for (i = 0; i < reg_size; i++) printf("%s%x", (i == 0) ? "" : " ", reg[i]); printf(">\n"); } /* Set the device to start it later */ cpu_list[cpuid] = dev; return (0); } static void release_aps(void *dummy __unused) { cpuset_t mask; int i; if (mp_ncpus == 1) return; /* Setup the IPI handler */ riscv_setup_ipihandler(ipi_handler); atomic_store_rel_int(&aps_ready, 1); /* Wake up the other CPUs */ mask = all_harts; CPU_CLR(boot_hart, &mask); printf("Release APs\n"); sbi_send_ipi(mask.__bits); for (i = 0; i < 2000; i++) { if (smp_started) return; DELAY(1000); } printf("APs not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); void init_secondary(uint64_t hart) { struct pcpu *pcpup; u_int cpuid; /* Renumber this cpu */ cpuid = hart; if (cpuid < boot_hart) cpuid += mp_maxid + 1; cpuid -= boot_hart; /* Setup the pcpu pointer */ pcpup = &__pcpu[cpuid]; __asm __volatile("mv tp, %0" :: "r"(pcpup)); /* Workaround: make sure wfi doesn't halt the hart */ csr_set(sie, SIE_SSIE); csr_set(sip, SIE_SSIE); /* Signal the BSP and spin until it has released all APs. */ atomic_add_int(&aps_started, 1); while (!atomic_load_int(&aps_ready)) __asm __volatile("wfi"); /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pcpup->pc_curthread = pcpup->pc_idlethread; /* * Identify current CPU. This is necessary to setup * affinity registers and to provide support for * runtime chip identification. */ identify_cpu(); /* Enable software interrupts */ riscv_unmask_ipi(); #ifndef EARLY_AP_STARTUP /* Start per-CPU event timers. */ cpu_initclocks_ap(); #endif /* Enable external (PLIC) interrupts */ csr_set(sie, SIE_SEIE); /* Activate process 0's pmap. */ pmap_activate_boot(vmspace_pmap(proc0.p_vmspace)); mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); /* * Assert that smp_after_idle_runnable condition is reasonable. */ MPASS(PCPU_GET(curpcb) == NULL); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to init_secondary"); /* NOTREACHED */ } static void smp_after_idle_runnable(void *arg __unused) { struct pcpu *pc; int cpu; for (cpu = 1; cpu <= mp_maxid; cpu++) { if (bootstacks[cpu] != NULL) { pc = pcpu_find(cpu); while (atomic_load_ptr(&pc->pc_curpcb) == NULL) cpu_spinwait(); kmem_free((vm_offset_t)bootstacks[cpu], PAGE_SIZE); } } } SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY, smp_after_idle_runnable, NULL); static int ipi_handler(void *arg) { u_int ipi_bitmap; u_int cpu, ipi; int bit; - sbi_clear_ipi(); + csr_clear(sip, SIP_SSIP); cpu = PCPU_GET(cpuid); mb(); ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); if (ipi_bitmap == 0) return (FILTER_HANDLED); while ((bit = ffs(ipi_bitmap))) { bit = (bit - 1); ipi = (1 << bit); ipi_bitmap &= ~ipi; mb(); switch (ipi) { case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_STOP: case IPI_STOP_HARD: CTR0(KTR_SMP, (ipi == IPI_STOP) ? "IPI_STOP" : "IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); CTR0(KTR_SMP, "IPI_STOP (restart)"); /* * The kernel debugger might have set a breakpoint, * so flush the instruction cache. */ fence_i(); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI %#0x on cpu %d", ipi, curcpu); } } return (FILTER_HANDLED); } struct cpu_group * cpu_topo(void) { return (smp_topo_none()); } /* Determine if we running MP machine */ int cpu_mp_probe(void) { return (mp_ncpus > 1); } #ifdef FDT static boolean_t cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { struct pcpu *pcpup; vm_paddr_t start_addr; uint64_t hart; u_int cpuid; int naps; int error; /* Check if this hart supports MMU. */ if (OF_getproplen(node, "mmu-type") < 0) return (0); KASSERT(id < MAXCPU, ("Too many CPUs")); KASSERT(addr_size == 1 || addr_size == 2, ("Invalid register size")); #ifdef INVARIANTS cpu_reg[id][0] = reg[0]; if (addr_size == 2) cpu_reg[id][1] = reg[1]; #endif hart = reg[0]; if (addr_size == 2) { hart <<= 32; hart |= reg[1]; } KASSERT(hart < MAXCPU, ("Too many harts.")); /* We are already running on this cpu */ if (hart == boot_hart) return (1); /* * Rotate the CPU IDs to put the boot CPU as CPU 0. * We keep the other CPUs ordered. */ cpuid = hart; if (cpuid < boot_hart) cpuid += mp_maxid + 1; cpuid -= boot_hart; /* Check if we are able to start this cpu */ if (cpuid > mp_maxid) return (0); /* * Depending on the SBI implementation, APs are waiting either in * locore.S or to be activated explicitly, via SBI call. */ if (sbi_probe_extension(SBI_EXT_ID_HSM) != 0) { start_addr = pmap_kextract((vm_offset_t)mpentry); error = sbi_hsm_hart_start(hart, start_addr, 0); if (error != 0) { mp_ncpus--; /* Send a warning to the user and continue. */ printf("AP %u (hart %lu) failed to start, error %d\n", cpuid, hart, error); return (0); } } pcpup = &__pcpu[cpuid]; pcpu_init(pcpup, cpuid, sizeof(struct pcpu)); pcpup->pc_hart = hart; dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO); dpcpu_init(dpcpu[cpuid - 1], cpuid); bootstacks[cpuid] = (void *)kmem_malloc(PAGE_SIZE, M_WAITOK | M_ZERO); naps = atomic_load_int(&aps_started); bootstack = (char *)bootstacks[cpuid] + PAGE_SIZE; printf("Starting CPU %u (hart %lx)\n", cpuid, hart); atomic_store_32(&__riscv_boot_ap[hart], 1); /* Wait for the AP to switch to its boot stack. */ while (atomic_load_int(&aps_started) < naps + 1) cpu_spinwait(); CPU_SET(cpuid, &all_cpus); CPU_SET(hart, &all_harts); return (1); } #endif /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); CPU_SET(0, &all_cpus); CPU_SET(boot_hart, &all_harts); switch(cpu_enum_method) { #ifdef FDT case CPUS_FDT: ofw_cpu_early_foreach(cpu_init_fdt, true); break; #endif case CPUS_UNKNOWN: break; } } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } static boolean_t cpu_check_mmu(u_int id, phandle_t node, u_int addr_size, pcell_t *reg) { /* Check if this hart supports MMU. */ if (OF_getproplen(node, "mmu-type") < 0) return (0); return (1); } void cpu_mp_setmaxid(void) { #ifdef FDT int cores; cores = ofw_cpu_early_foreach(cpu_check_mmu, true); if (cores > 0) { cores = MIN(cores, MAXCPU); if (bootverbose) printf("Found %d CPUs in the device tree\n", cores); mp_ncpus = cores; mp_maxid = cores - 1; cpu_enum_method = CPUS_FDT; return; } #endif if (bootverbose) printf("No CPU data, limiting to 1 core\n"); mp_ncpus = 1; mp_maxid = 0; } Index: head/sys/riscv/riscv/sbi.c =================================================================== --- head/sys/riscv/riscv/sbi.c (revision 367066) +++ head/sys/riscv/riscv/sbi.c (revision 367067) @@ -1,207 +1,205 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2019 Mitchell Horne * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include /* SBI Implementation-Specific Definitions */ #define OPENSBI_VERSION_MAJOR_OFFSET 16 #define OPENSBI_VERSION_MINOR_MASK 0xFFFF u_long sbi_spec_version; u_long sbi_impl_id; u_long sbi_impl_version; static struct sbi_ret sbi_get_spec_version(void) { return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_SPEC_VERSION)); } static struct sbi_ret sbi_get_impl_id(void) { return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_ID)); } static struct sbi_ret sbi_get_impl_version(void) { return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_IMPL_VERSION)); } static struct sbi_ret sbi_get_mvendorid(void) { return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_MVENDORID)); } static struct sbi_ret sbi_get_marchid(void) { return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_MARCHID)); } static struct sbi_ret sbi_get_mimpid(void) { return (SBI_CALL0(SBI_EXT_ID_BASE, SBI_BASE_GET_MIMPID)); } static void sbi_shutdown_final(void *dummy __unused, int howto) { if ((howto & RB_POWEROFF) != 0) sbi_shutdown(); } void sbi_print_version(void) { u_int major; u_int minor; /* For legacy SBI implementations. */ if (sbi_spec_version == 0) { printf("SBI: Unknown (Legacy) Implementation\n"); printf("SBI Specification Version: 0.1\n"); return; } switch (sbi_impl_id) { case (SBI_IMPL_ID_BBL): printf("SBI: Berkely Boot Loader %lu\n", sbi_impl_version); break; case (SBI_IMPL_ID_OPENSBI): major = sbi_impl_version >> OPENSBI_VERSION_MAJOR_OFFSET; minor = sbi_impl_version & OPENSBI_VERSION_MINOR_MASK; printf("SBI: OpenSBI v%u.%u\n", major, minor); break; default: printf("SBI: Unrecognized Implementation: %lu\n", sbi_impl_id); break; } major = (sbi_spec_version & SBI_SPEC_VERS_MAJOR_MASK) >> SBI_SPEC_VERS_MAJOR_OFFSET; minor = (sbi_spec_version & SBI_SPEC_VERS_MINOR_MASK); printf("SBI Specification Version: %u.%u\n", major, minor); } int sbi_hsm_hart_start(u_long hart, u_long start_addr, u_long priv) { struct sbi_ret ret; ret = SBI_CALL3(SBI_EXT_ID_HSM, SBI_HSM_HART_START, hart, start_addr, priv); return (ret.error != 0 ? (int)ret.error : 0); } void sbi_hsm_hart_stop(void) { (void)SBI_CALL0(SBI_EXT_ID_HSM, SBI_HSM_HART_STOP); } int sbi_hsm_hart_status(u_long hart) { struct sbi_ret ret; ret = SBI_CALL1(SBI_EXT_ID_HSM, SBI_HSM_HART_STATUS, hart); return (ret.error != 0 ? (int)ret.error : (int)ret.value); } void sbi_init(void) { struct sbi_ret sret; /* * Get the spec version. For legacy SBI implementations this will * return an error, otherwise it is guaranteed to succeed. */ sret = sbi_get_spec_version(); if (sret.error != 0) { /* We are running a legacy SBI implementation. */ sbi_spec_version = 0; return; } /* Set the SBI implementation info. */ sbi_spec_version = sret.value; sbi_impl_id = sbi_get_impl_id().value; sbi_impl_version = sbi_get_impl_version().value; /* Set the hardware implementation info. */ mvendorid = sbi_get_mvendorid().value; marchid = sbi_get_marchid().value; mimpid = sbi_get_mimpid().value; /* * Probe for legacy extensions. Currently we rely on all of them * to be implemented, but this is not guaranteed by the spec. */ KASSERT(sbi_probe_extension(SBI_SET_TIMER) != 0, ("SBI doesn't implement sbi_set_timer()")); KASSERT(sbi_probe_extension(SBI_CONSOLE_PUTCHAR) != 0, ("SBI doesn't implement sbi_console_putchar()")); KASSERT(sbi_probe_extension(SBI_CONSOLE_GETCHAR) != 0, ("SBI doesn't implement sbi_console_getchar()")); - KASSERT(sbi_probe_extension(SBI_CLEAR_IPI) != 0, - ("SBI doesn't implement sbi_clear_ipi()")); KASSERT(sbi_probe_extension(SBI_SEND_IPI) != 0, ("SBI doesn't implement sbi_send_ipi()")); KASSERT(sbi_probe_extension(SBI_REMOTE_FENCE_I) != 0, ("SBI doesn't implement sbi_remote_fence_i()")); KASSERT(sbi_probe_extension(SBI_REMOTE_SFENCE_VMA) != 0, ("SBI doesn't implement sbi_remote_sfence_vma()")); KASSERT(sbi_probe_extension(SBI_REMOTE_SFENCE_VMA_ASID) != 0, ("SBI doesn't implement sbi_remote_sfence_vma_asid()")); KASSERT(sbi_probe_extension(SBI_SHUTDOWN) != 0, ("SBI doesn't implement sbi_shutdown()")); } static void sbi_late_init(void *dummy __unused) { EVENTHANDLER_REGISTER(shutdown_final, sbi_shutdown_final, NULL, SHUTDOWN_PRI_LAST); } SYSINIT(sbi, SI_SUB_KLD, SI_ORDER_ANY, sbi_late_init, NULL);