diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S index ca00d473fd47..6af70ca839a0 100644 --- a/sys/arm64/arm64/swtch.S +++ b/sys/arm64/arm64/swtch.S @@ -1,282 +1,280 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "assym.inc" #include "opt_kstack_pages.h" #include "opt_sched.h" #include #include #include .macro clear_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 bic \tmp, \tmp, #MDSCR_SS msr mdscr_el1, \tmp isb 999: .endm .macro set_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 orr \tmp, \tmp, #MDSCR_SS msr mdscr_el1, \tmp isb 999: .endm /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) /* Of old == NULL skip disabling stepping */ cbz x0, 1f /* If we were single stepping, disable it */ ldr x4, [x0, #TD_PCB] ldr w5, [x4, #PCB_FLAGS] clear_step_flag w5, x6 1: #ifdef VFP /* Backup the new thread pointer around a call to C code */ mov x19, x1 bl vfp_discard mov x0, x19 #else mov x0, x1 #endif /* This returns the thread pointer so no need to save it */ bl ptrauth_switch #ifdef PERTHREAD_SSP mov x19, x0 #endif /* This returns the thread pcb */ bl pmap_switch mov x4, x0 #ifdef PERTHREAD_SSP /* Update the per-thread stack canary pointer. */ add x19, x19, #(TD_MD_CANARY) msr sp_el0, x19 #endif /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] set_step_flag w5, x6 /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldr x6, [x4, #PCB_TPIDRRO] msr tpidrro_el0, x6 ldp x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8] ldp x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8] ldp x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8] ldp x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8] ldp x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8] ldp x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8] ret END(cpu_throw) /* * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) * * x0 = old * x1 = new * x2 = mtx * x3 to x7, x16 and x17 are caller saved */ ENTRY(cpu_switch) /* * Save the old context. */ ldr x4, [x0, #TD_PCB] /* Store the callee-saved registers */ stp x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8] stp x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8] stp x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8] stp x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8] stp x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8] stp x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8] /* And the old stack pointer */ mov x5, sp mrs x6, tpidrro_el0 str x6, [x4, #PCB_TPIDRRO] mrs x6, tpidr_el0 stp x5, x6, [x4, #PCB_SP] /* If we were single stepping, disable it */ ldr w5, [x4, #PCB_FLAGS] clear_step_flag w5, x6 mov x19, x0 mov x20, x1 mov x21, x2 #ifdef VFP - /* Load the pcb address */ - mov x1, x4 - bl vfp_save_state + bl vfp_save_state_switch mov x0, x20 #else mov x0, x1 #endif /* This returns the thread pointer so no need to save it */ bl ptrauth_switch /* This returns the thread pcb */ bl pmap_switch /* Move the new pcb out of the way */ mov x4, x0 mov x2, x21 mov x1, x20 mov x0, x19 #ifdef PERTHREAD_SSP /* Update the per-thread stack canary pointer. */ add x20, x20, #(TD_MD_CANARY) msr sp_el0, x20 #endif /* * Release the old thread. */ stlr x2, [x0, #TD_LOCK] #if defined(SCHED_ULE) && defined(SMP) /* Spin if TD_LOCK points to a blocked_lock */ ldr x2, =_C_LABEL(blocked_lock) 1: ldar x3, [x1, #TD_LOCK] cmp x3, x2 b.eq 1b #endif /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] set_step_flag w5, x6 /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldr x6, [x4, #PCB_TPIDRRO] msr tpidrro_el0, x6 ldp x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8] ldp x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8] ldp x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8] ldp x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8] ldp x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8] ldp x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8] ret END(cpu_switch) ENTRY(fork_trampoline) mov x0, x19 mov x1, x20 mov x2, sp mov fp, #0 /* Stack traceback stops here. */ bl _C_LABEL(fork_exit) /* * Disable interrupts as we are setting userspace specific * state that we won't handle correctly in an interrupt while * in the kernel. */ msr daifset, #(DAIF_D | DAIF_INTR) ldr x0, [x18, #PC_CURTHREAD] bl ptrauth_enter_el0 /* Restore sp, lr, elr, and spsr */ ldp x18, lr, [sp, #TF_SP] ldp x10, x11, [sp, #TF_ELR] msr sp_el0, x18 msr spsr_el1, x11 msr elr_el1, x10 /* Restore the CPU registers */ ldp x0, x1, [sp, #TF_X + 0 * 8] ldp x2, x3, [sp, #TF_X + 2 * 8] ldp x4, x5, [sp, #TF_X + 4 * 8] ldp x6, x7, [sp, #TF_X + 6 * 8] ldp x8, x9, [sp, #TF_X + 8 * 8] ldp x10, x11, [sp, #TF_X + 10 * 8] ldp x12, x13, [sp, #TF_X + 12 * 8] ldp x14, x15, [sp, #TF_X + 14 * 8] ldp x16, x17, [sp, #TF_X + 16 * 8] ldp x18, x19, [sp, #TF_X + 18 * 8] ldp x20, x21, [sp, #TF_X + 20 * 8] ldp x22, x23, [sp, #TF_X + 22 * 8] ldp x24, x25, [sp, #TF_X + 24 * 8] ldp x26, x27, [sp, #TF_X + 26 * 8] ldp x28, x29, [sp, #TF_X + 28 * 8] /* * No need for interrupts reenabling since PSR * will be set to the desired value anyway. */ ERET END(fork_trampoline) ENTRY(savectx) /* Store the callee-saved registers */ stp x19, x20, [x0, #PCB_REGS + (PCB_X19 + 0) * 8] stp x21, x22, [x0, #PCB_REGS + (PCB_X19 + 2) * 8] stp x23, x24, [x0, #PCB_REGS + (PCB_X19 + 4) * 8] stp x25, x26, [x0, #PCB_REGS + (PCB_X19 + 6) * 8] stp x27, x28, [x0, #PCB_REGS + (PCB_X19 + 8) * 8] stp x29, lr, [x0, #PCB_REGS + (PCB_X19 + 10) * 8] /* And the old stack pointer */ mov x5, sp mrs x6, tpidrro_el0 str x6, [x0, #PCB_TPIDRRO] mrs x6, tpidr_el0 stp x5, x6, [x0, #PCB_SP] /* Store the VFP registers */ #ifdef VFP mov x28, lr bl vfp_save_state_savectx mov lr, x28 #endif ret END(savectx) GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/vfp.c b/sys/arm64/arm64/vfp.c index f35cd960702b..c65108a83399 100644 --- a/sys/arm64/arm64/vfp.c +++ b/sys/arm64/arm64/vfp.c @@ -1,499 +1,507 @@ /*- * Copyright (c) 2015-2016 The FreeBSD Foundation * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #ifdef VFP #include #include #include #include #include #include #include #include #include #include #include #include /* Sanity check we can store all the VFP registers */ CTASSERT(sizeof(((struct pcb *)0)->pcb_fpustate.vfp_regs) == 16 * 32); static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx", "Kernel contexts for VFP state"); struct fpu_kern_ctx { struct vfpstate *prev; #define FPU_KERN_CTX_DUMMY 0x01 /* avoided save for the kern thread */ #define FPU_KERN_CTX_INUSE 0x02 uint32_t flags; struct vfpstate state; }; static uma_zone_t fpu_save_area_zone; static struct vfpstate *fpu_initialstate; void vfp_enable(void) { uint32_t cpacr; cpacr = READ_SPECIALREG(cpacr_el1); cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE; WRITE_SPECIALREG(cpacr_el1, cpacr); isb(); } void vfp_disable(void) { uint32_t cpacr; cpacr = READ_SPECIALREG(cpacr_el1); cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1; WRITE_SPECIALREG(cpacr_el1, cpacr); isb(); } /* * Called when the thread is dying or when discarding the kernel VFP state. * If the thread was the last to use the VFP unit mark it as unused to tell * the kernel the fp state is unowned. Ensure the VFP unit is off so we get * an exception on the next access. */ void vfp_discard(struct thread *td) { #ifdef INVARIANTS if (td != NULL) CRITICAL_ASSERT(td); #endif if (PCPU_GET(fpcurthread) == td) PCPU_SET(fpcurthread, NULL); vfp_disable(); } void vfp_store(struct vfpstate *state) { __uint128_t *vfp_state; uint64_t fpcr, fpsr; vfp_state = state->vfp_regs; __asm __volatile( ".arch_extension fp\n" "mrs %0, fpcr \n" "mrs %1, fpsr \n" "stp q0, q1, [%2, #16 * 0]\n" "stp q2, q3, [%2, #16 * 2]\n" "stp q4, q5, [%2, #16 * 4]\n" "stp q6, q7, [%2, #16 * 6]\n" "stp q8, q9, [%2, #16 * 8]\n" "stp q10, q11, [%2, #16 * 10]\n" "stp q12, q13, [%2, #16 * 12]\n" "stp q14, q15, [%2, #16 * 14]\n" "stp q16, q17, [%2, #16 * 16]\n" "stp q18, q19, [%2, #16 * 18]\n" "stp q20, q21, [%2, #16 * 20]\n" "stp q22, q23, [%2, #16 * 22]\n" "stp q24, q25, [%2, #16 * 24]\n" "stp q26, q27, [%2, #16 * 26]\n" "stp q28, q29, [%2, #16 * 28]\n" "stp q30, q31, [%2, #16 * 30]\n" ".arch_extension nofp\n" : "=&r"(fpcr), "=&r"(fpsr) : "r"(vfp_state)); state->vfp_fpcr = fpcr; state->vfp_fpsr = fpsr; } void vfp_restore(struct vfpstate *state) { __uint128_t *vfp_state; uint64_t fpcr, fpsr; vfp_state = state->vfp_regs; fpcr = state->vfp_fpcr; fpsr = state->vfp_fpsr; __asm __volatile( ".arch_extension fp\n" "ldp q0, q1, [%2, #16 * 0]\n" "ldp q2, q3, [%2, #16 * 2]\n" "ldp q4, q5, [%2, #16 * 4]\n" "ldp q6, q7, [%2, #16 * 6]\n" "ldp q8, q9, [%2, #16 * 8]\n" "ldp q10, q11, [%2, #16 * 10]\n" "ldp q12, q13, [%2, #16 * 12]\n" "ldp q14, q15, [%2, #16 * 14]\n" "ldp q16, q17, [%2, #16 * 16]\n" "ldp q18, q19, [%2, #16 * 18]\n" "ldp q20, q21, [%2, #16 * 20]\n" "ldp q22, q23, [%2, #16 * 22]\n" "ldp q24, q25, [%2, #16 * 24]\n" "ldp q26, q27, [%2, #16 * 26]\n" "ldp q28, q29, [%2, #16 * 28]\n" "ldp q30, q31, [%2, #16 * 30]\n" "msr fpcr, %0 \n" "msr fpsr, %1 \n" ".arch_extension nofp\n" : : "r"(fpcr), "r"(fpsr), "r"(vfp_state)); } static void vfp_save_state_common(struct thread *td, struct pcb *pcb) { uint32_t cpacr; critical_enter(); /* * Only store the registers if the VFP is enabled, * i.e. return if we are trapping on FP access. */ cpacr = READ_SPECIALREG(cpacr_el1); if ((cpacr & CPACR_FPEN_MASK) == CPACR_FPEN_TRAP_NONE) { KASSERT(PCPU_GET(fpcurthread) == td, ("Storing an invalid VFP state")); vfp_store(pcb->pcb_fpusaved); dsb(ish); vfp_disable(); } critical_exit(); } void vfp_save_state(struct thread *td, struct pcb *pcb) { KASSERT(td != NULL, ("NULL vfp thread")); KASSERT(pcb != NULL, ("NULL vfp pcb")); KASSERT(td->td_pcb == pcb, ("Invalid vfp pcb")); vfp_save_state_common(td, pcb); } void vfp_save_state_savectx(struct pcb *pcb) { /* * savectx() will be called on panic with dumppcb as an argument, * dumppcb doesn't have pcb_fpusaved set, so set it to save * the VFP registers. */ MPASS(pcb->pcb_fpusaved == NULL); pcb->pcb_fpusaved = &pcb->pcb_fpustate; vfp_save_state_common(curthread, pcb); } +void +vfp_save_state_switch(struct thread *td) +{ + KASSERT(td != NULL, ("NULL vfp thread")); + + vfp_save_state_common(td, td->td_pcb); +} + /* * Update the VFP state for a forked process or new thread. The PCB will * have been copied from the old thread. */ void vfp_new_thread(struct thread *newtd, struct thread *oldtd, bool fork) { struct pcb *newpcb; newpcb = newtd->td_pcb; /* Kernel threads start with clean VFP */ if ((oldtd->td_pflags & TDP_KTHREAD) != 0) { newpcb->pcb_fpflags &= ~(PCB_FP_STARTED | PCB_FP_KERN | PCB_FP_NOSAVE); } else { MPASS((newpcb->pcb_fpflags & (PCB_FP_KERN|PCB_FP_NOSAVE)) == 0); if (!fork) { newpcb->pcb_fpflags &= ~PCB_FP_STARTED; } } newpcb->pcb_fpusaved = &newpcb->pcb_fpustate; newpcb->pcb_vfpcpu = UINT_MAX; } /* * Reset the FP state to avoid leaking state from the parent process across * execve() (and to ensure that we get a consistent floating point environment * in every new process). */ void vfp_reset_state(struct thread *td, struct pcb *pcb) { /* Discard the threads VFP state before resetting it */ critical_enter(); vfp_discard(td); critical_exit(); /* * Clear the thread state. The VFP is disabled and is not the current * VFP thread so we won't change any of these on context switch. */ bzero(&pcb->pcb_fpustate.vfp_regs, sizeof(pcb->pcb_fpustate.vfp_regs)); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("pcb_fpusaved should point to pcb_fpustate.")); pcb->pcb_fpustate.vfp_fpcr = VFPCR_INIT; pcb->pcb_fpustate.vfp_fpsr = 0; pcb->pcb_vfpcpu = UINT_MAX; pcb->pcb_fpflags = 0; } void vfp_restore_state(void) { struct pcb *curpcb; u_int cpu; critical_enter(); cpu = PCPU_GET(cpuid); curpcb = curthread->td_pcb; curpcb->pcb_fpflags |= PCB_FP_STARTED; vfp_enable(); /* * If the previous thread on this cpu to use the VFP was not the * current thread, or the current thread last used it on a different * cpu we need to restore the old state. */ if (PCPU_GET(fpcurthread) != curthread || cpu != curpcb->pcb_vfpcpu) { vfp_restore(curthread->td_pcb->pcb_fpusaved); PCPU_SET(fpcurthread, curthread); curpcb->pcb_vfpcpu = cpu; } critical_exit(); } void vfp_init_secondary(void) { uint64_t pfr; /* Check if there is a vfp unit present */ pfr = READ_SPECIALREG(id_aa64pfr0_el1); if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE) return; /* Disable to be enabled when it's used */ vfp_disable(); } static void vfp_init(const void *dummy __unused) { uint64_t pfr; /* Check if there is a vfp unit present */ pfr = READ_SPECIALREG(id_aa64pfr0_el1); if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE) return; fpu_save_area_zone = uma_zcreate("VFP_save_area", sizeof(struct vfpstate), NULL, NULL, NULL, NULL, _Alignof(struct vfpstate) - 1, 0); fpu_initialstate = uma_zalloc(fpu_save_area_zone, M_WAITOK | M_ZERO); /* Ensure the VFP is enabled before accessing it in vfp_store */ vfp_enable(); vfp_store(fpu_initialstate); /* Disable to be enabled when it's used */ vfp_disable(); /* Zero the VFP registers but keep fpcr and fpsr */ bzero(fpu_initialstate->vfp_regs, sizeof(fpu_initialstate->vfp_regs)); thread0.td_pcb->pcb_fpusaved->vfp_fpcr = VFPCR_INIT; } SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL); struct fpu_kern_ctx * fpu_kern_alloc_ctx(u_int flags) { struct fpu_kern_ctx *res; size_t sz; sz = sizeof(struct fpu_kern_ctx); res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ? M_NOWAIT : M_WAITOK) | M_ZERO); return (res); } void fpu_kern_free_ctx(struct fpu_kern_ctx *ctx) { KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx")); /* XXXAndrew clear the memory ? */ free(ctx, M_FPUKERN_CTX); } void fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) { struct pcb *pcb; pcb = td->td_pcb; KASSERT((flags & FPU_KERN_NOCTX) != 0 || ctx != NULL, ("ctx is required when !FPU_KERN_NOCTX")); KASSERT(ctx == NULL || (ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("using inuse ctx")); KASSERT((pcb->pcb_fpflags & PCB_FP_NOSAVE) == 0, ("recursive fpu_kern_enter while in PCB_FP_NOSAVE state")); if ((flags & FPU_KERN_NOCTX) != 0) { critical_enter(); if (curthread == PCPU_GET(fpcurthread)) { vfp_save_state(curthread, pcb); } PCPU_SET(fpcurthread, NULL); vfp_enable(); pcb->pcb_fpflags |= PCB_FP_KERN | PCB_FP_NOSAVE | PCB_FP_STARTED; return; } if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) { ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE; return; } /* * Check either we are already using the VFP in the kernel, or * the saved state points to the default user space. */ KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0 || pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Mangled pcb_fpusaved %x %p %p", pcb->pcb_fpflags, pcb->pcb_fpusaved, &pcb->pcb_fpustate)); ctx->flags = FPU_KERN_CTX_INUSE; vfp_save_state(curthread, pcb); ctx->prev = pcb->pcb_fpusaved; pcb->pcb_fpusaved = &ctx->state; pcb->pcb_fpflags |= PCB_FP_KERN; pcb->pcb_fpflags &= ~PCB_FP_STARTED; return; } int fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) { struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_NOSAVE) != 0) { KASSERT(ctx == NULL, ("non-null ctx after FPU_KERN_NOCTX")); KASSERT(PCPU_GET(fpcurthread) == NULL, ("non-NULL fpcurthread for PCB_FP_NOSAVE")); CRITICAL_ASSERT(td); vfp_disable(); pcb->pcb_fpflags &= ~(PCB_FP_NOSAVE | PCB_FP_STARTED); critical_exit(); } else { KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0, ("FPU context not inuse")); ctx->flags &= ~FPU_KERN_CTX_INUSE; if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0) return (0); KASSERT((ctx->flags & FPU_KERN_CTX_DUMMY) == 0, ("dummy ctx")); critical_enter(); vfp_discard(td); critical_exit(); pcb->pcb_fpflags &= ~PCB_FP_STARTED; pcb->pcb_fpusaved = ctx->prev; } if (pcb->pcb_fpusaved == &pcb->pcb_fpustate) { pcb->pcb_fpflags &= ~PCB_FP_KERN; } else { KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) != 0, ("unpaired fpu_kern_leave")); } return (0); } int fpu_kern_thread(u_int flags __unused) { struct pcb *pcb = curthread->td_pcb; KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, ("Only kthread may use fpu_kern_thread")); KASSERT(pcb->pcb_fpusaved == &pcb->pcb_fpustate, ("Mangled pcb_fpusaved")); KASSERT((pcb->pcb_fpflags & PCB_FP_KERN) == 0, ("Thread already setup for the VFP")); pcb->pcb_fpflags |= PCB_FP_KERN; return (0); } int is_fpu_kern_thread(u_int flags __unused) { struct pcb *curpcb; if ((curthread->td_pflags & TDP_KTHREAD) == 0) return (0); curpcb = curthread->td_pcb; return ((curpcb->pcb_fpflags & PCB_FP_KERN) != 0); } /* * FPU save area alloc/free/init utility routines */ struct vfpstate * fpu_save_area_alloc(void) { return (uma_zalloc(fpu_save_area_zone, M_WAITOK)); } void fpu_save_area_free(struct vfpstate *fsa) { uma_zfree(fpu_save_area_zone, fsa); } void fpu_save_area_reset(struct vfpstate *fsa) { memcpy(fsa, fpu_initialstate, sizeof(*fsa)); } #endif diff --git a/sys/arm64/include/vfp.h b/sys/arm64/include/vfp.h index 7f4c86e7737d..47d068d6050c 100644 --- a/sys/arm64/include/vfp.h +++ b/sys/arm64/include/vfp.h @@ -1,120 +1,121 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef __arm__ #include #else /* !__arm__ */ #ifndef _MACHINE_VFP_H_ #define _MACHINE_VFP_H_ /* VFPCR */ #define VFPCR_AHP (0x04000000) /* alt. half-precision: */ #define VFPCR_DN (0x02000000) /* default NaN enable */ #define VFPCR_FZ (0x01000000) /* flush to zero enabled */ #define VFPCR_INIT 0 /* Default fpcr after exec */ #define VFPCR_RMODE_OFF 22 /* rounding mode offset */ #define VFPCR_RMODE_MASK (0x00c00000) /* rounding mode mask */ #define VFPCR_RMODE_RN (0x00000000) /* round nearest */ #define VFPCR_RMODE_RPI (0x00400000) /* round to plus infinity */ #define VFPCR_RMODE_RNI (0x00800000) /* round to neg infinity */ #define VFPCR_RMODE_RM (0x00c00000) /* round to zero */ #define VFPCR_STRIDE_OFF 20 /* vector stride -1 */ #define VFPCR_STRIDE_MASK (0x00300000) #define VFPCR_LEN_OFF 16 /* vector length -1 */ #define VFPCR_LEN_MASK (0x00070000) #define VFPCR_IDE (0x00008000) /* input subnormal exc enable */ #define VFPCR_IXE (0x00001000) /* inexact exception enable */ #define VFPCR_UFE (0x00000800) /* underflow exception enable */ #define VFPCR_OFE (0x00000400) /* overflow exception enable */ #define VFPCR_DZE (0x00000200) /* div by zero exception en */ #define VFPCR_IOE (0x00000100) /* invalid op exec enable */ #ifndef LOCORE struct vfpstate { __uint128_t vfp_regs[32]; uint32_t vfp_fpcr; uint32_t vfp_fpsr; }; #ifdef _KERNEL struct pcb; struct thread; void vfp_init_secondary(void); void vfp_enable(void); void vfp_disable(void); void vfp_discard(struct thread *); void vfp_store(struct vfpstate *); void vfp_restore(struct vfpstate *); void vfp_new_thread(struct thread *, struct thread *, bool); void vfp_reset_state(struct thread *, struct pcb *); void vfp_restore_state(void); void vfp_save_state(struct thread *, struct pcb *); void vfp_save_state_savectx(struct pcb *); +void vfp_save_state_switch(struct thread *); struct fpu_kern_ctx; /* * Flags for fpu_kern_alloc_ctx(), fpu_kern_enter() and fpu_kern_thread(). */ #define FPU_KERN_NORMAL 0x0000 #define FPU_KERN_NOWAIT 0x0001 #define FPU_KERN_KTHR 0x0002 #define FPU_KERN_NOCTX 0x0004 struct fpu_kern_ctx *fpu_kern_alloc_ctx(u_int); void fpu_kern_free_ctx(struct fpu_kern_ctx *); void fpu_kern_enter(struct thread *, struct fpu_kern_ctx *, u_int); int fpu_kern_leave(struct thread *, struct fpu_kern_ctx *); int fpu_kern_thread(u_int); int is_fpu_kern_thread(u_int); struct vfpstate *fpu_save_area_alloc(void); void fpu_save_area_free(struct vfpstate *fsa); void fpu_save_area_reset(struct vfpstate *fsa); /* Convert to and from Aarch32 FPSCR to Aarch64 FPCR/FPSR */ #define VFP_FPSCR_FROM_SRCR(vpsr, vpcr) ((vpsr) | ((vpcr) & 0x7c00000)) #define VFP_FPSR_FROM_FPSCR(vpscr) ((vpscr) &~ 0x7c00000) #define VFP_FPCR_FROM_FPSCR(vpsrc) ((vpsrc) & 0x7c00000) #ifdef COMPAT_FREEBSD32 void get_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp); void set_fpcontext32(struct thread *td, mcontext32_vfp_t *mcp); #endif #endif #endif #endif /* !_MACHINE_VFP_H_ */ #endif /* !__arm__ */