Index: head/sys/arm64/arm64/machdep.c =================================================================== --- head/sys/arm64/arm64/machdep.c (revision 286224) +++ head/sys/arm64/arm64/machdep.c (revision 286225) @@ -1,865 +1,865 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef FDT #include #include #endif struct pcpu __pcpu[MAXCPU]; static struct trapframe proc0_tf; vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2]; vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2]; int early_boot = 1; int cold = 1; long realmem = 0; long Maxmem = 0; #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t physmap[PHYSMAP_SIZE]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ static void cpu_startup(void *dummy) { identify_cpu(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } void bzero(void *buf, size_t len) { uint8_t *p; p = buf; while(len-- > 0) *p++ = 0; } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sp = frame->tf_sp; regs->lr = frame->tf_lr; regs->elr = frame->tf_elr; regs->spsr = frame->tf_spsr; memcpy(regs->x, frame->tf_x, sizeof(regs->x)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sp = regs->sp; frame->tf_lr = regs->lr; frame->tf_elr = regs->elr; frame->tf_spsr = regs->spsr; memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ - vfp_save_state(td); + vfp_save_state(td, pcb); memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q)); regs->fp_cr = pcb->pcb_fpcr; regs->fp_sr = pcb->pcb_fpsr; } else #endif memset(regs->fp_q, 0, sizeof(regs->fp_q)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { #ifdef VFP struct pcb *pcb; pcb = td->td_pcb; memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q)); pcb->pcb_fpcr = regs->fp_cr; pcb->pcb_fpsr = regs->fp_sr; #endif return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("ARM64TODO: fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("ARM64TODO: set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { panic("ARM64TODO: ptrace_set_pc"); return (0); } int ptrace_single_step(struct thread *td) { /* TODO; */ return (0); } int ptrace_clear_single_step(struct thread *td) { /* TODO; */ return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); tf->tf_sp = stack; tf->tf_lr = imgp->entry_addr; tf->tf_elr = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct gpregs *)0)->gp_x); CTASSERT(sizeof(((struct trapframe *)0)->tf_x) == sizeof((struct reg *)0)->x); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_x[0] = 0; mcp->mc_gpregs.gp_spsr = tf->tf_spsr & ~PSR_C; } else { mcp->mc_gpregs.gp_x[0] = tf->tf_x[0]; mcp->mc_gpregs.gp_spsr = tf->tf_spsr; } memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1], sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1)); mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_lr = tf->tf_lr; mcp->mc_gpregs.gp_elr = tf->tf_elr; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x)); tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_lr = mcp->mc_gpregs.gp_lr; tf->tf_elr = mcp->mc_gpregs.gp_elr; tf->tf_spsr = mcp->mc_gpregs.gp_spsr; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); curpcb = curthread->td_pcb; if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) { /* * If we have just been running VFP instructions we will * need to save the state to memcpy it below. */ - vfp_save_state(td); + vfp_save_state(td, curpcb); memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp, sizeof(mcp->mc_fpregs)); mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr; mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr; mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags; mcp->mc_flags |= _MC_FP_VALID; } critical_exit(); #endif } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { #ifdef VFP struct pcb *curpcb; critical_enter(); if ((mcp->mc_flags & _MC_FP_VALID) != 0) { curpcb = curthread->td_pcb; /* * Discard any vfp state for the current thread, we * are about to override it. */ vfp_discard(td); memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q, sizeof(mcp->mc_fpregs)); curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr; curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr; curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags; } critical_exit(); #endif } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "dsb sy \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { /* We should have shutdown by now, if not enter a low power sleep */ intr_disable(); while (1) { __asm __volatile("wfi"); } } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* ARM64TODO TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { panic("ARM64TODO: cpu_est_clockrate"); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; register_t daif; td = curthread; if (td->td_md.md_spinlock_count == 0) { daif = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = daif; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t daif; td = curthread; critical_exit(); daif = td->td_md.md_saved_daif; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(daif); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { ucontext_t uc; uint32_t spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); spsr = uc.uc_mcontext.mc_gpregs.gp_spsr; if ((spsr & PSR_M_MASK) != PSR_M_EL0t || (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0) return (EINVAL); set_mcontext(td, &uc.uc_mcontext); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { int i; for (i = 0; i < PCB_LR; i++) pcb->pcb_x[i] = tf->tf_x[i]; pcb->pcb_x[PCB_LR] = tf->tf_lr; pcb->pcb_pc = tf->tf_elr; pcb->pcb_sp = tf->tf_sp; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; int code, onstack, sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_x[0]= sig; tf->tf_x[1] = (register_t)&fp->sf_si; tf->tf_x[2] = (register_t)&fp->sf_uc; tf->tf_elr = (register_t)catcher; tf->tf_sp = (register_t)fp; tf->tf_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { struct pcpu *pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; thread0.td_pcb->pcb_fpflags = 0; thread0.td_pcb->pcb_vfpcpu = UINT_MAX; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } typedef struct { uint32_t type; uint64_t phys_start; uint64_t virt_start; uint64_t num_pages; uint64_t attr; } EFI_MEMORY_DESCRIPTOR; static int add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, u_int *physmap_idxp) { u_int i, insert_idx, _physmap_idx; _physmap_idx = *physmap_idxp; if (length == 0) return (1); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = _physmap_idx; for (i = 0; i <= _physmap_idx; i += 2) { if (base < physmap[i + 1]) { if (base + length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= _physmap_idx && base + length == physmap[insert_idx]) { physmap[insert_idx] = base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += length; return (1); } _physmap_idx += 2; *physmap_idxp = _physmap_idx; if (_physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = _physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = base; physmap[insert_idx + 1] = base + length; return (1); } #define efi_next_descriptor(ptr, size) \ ((struct efi_md *)(((uint8_t *) ptr) + size)) static void add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap, u_int *physmap_idxp) { struct efi_md *map, *p; const char *type; size_t efisz; int ndesc, i; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode" }; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; if (boothowto & RB_VERBOSE) printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { if (boothowto & RB_VERBOSE) { if (p->md_type <= EFI_MD_TYPE_PALCODE) type = types[p->md_type]; else type = ""; printf("%23s %012lx %12p %08lx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: continue; } if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE), physmap, physmap_idxp)) break; } } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static void cache_setup(void) { int dcache_line_shift, icache_line_shift; uint32_t ctr_el0; ctr_el0 = READ_SPECIALREG(ctr_el0); /* Read the log2 words in each D cache line */ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0); /* Get the D cache line size */ dcache_line_size = sizeof(int) << dcache_line_shift; /* And the same for the I cache */ icache_line_shift = CTR_ILINE_SIZE(ctr_el0); icache_line_size = sizeof(int) << icache_line_shift; idcache_line_size = MIN(dcache_line_size, icache_line_size); } void initarm(struct arm64_bootparams *abp) { struct efi_map_header *efihdr; struct pcpu *pcpup; vm_offset_t lastaddr; caddr_t kmdp; vm_paddr_t mem_len; int i; /* Set the module data location */ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); #ifdef FDT try_load_dtb(kmdp); #endif /* Find the address to start allocating from */ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); /* Load the physical memory ranges */ physmap_idx = 0; efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); add_efi_map_entries(efihdr, physmap, &physmap_idx); /* Print the memory map */ mem_len = 0; for (i = 0; i < physmap_idx; i += 2) mem_len += physmap[i + 1] - physmap[i]; /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* * Set the pcpu pointer with a backup in tpidr_el1 to be * loaded when entering the kernel from userland. */ __asm __volatile( "mov x18, %0 \n" "msr tpidr_el1, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta, lastaddr - KERNBASE); arm_devmap_bootstrap(0, NULL); cninit(); init_proc0(abp->kern_stack); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); dbg_monitor_init(); kdb_init(); early_boot = 0; } Index: head/sys/arm64/arm64/swtch.S =================================================================== --- head/sys/arm64/arm64/swtch.S (revision 286224) +++ head/sys/arm64/arm64/swtch.S (revision 286225) @@ -1,278 +1,282 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "assym.s" #include "opt_kstack_pages.h" #include "opt_sched.h" #include __FBSDID("$FreeBSD$"); /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) #ifdef VFP /* Backup the new thread pointer around a call to C code */ mov x19, x1 bl vfp_discard mov x1, x19 #endif /* Store the new curthread */ str x1, [x18, #PC_CURTHREAD] /* And the new pcb */ ldr x4, [x1, #TD_PCB] str x4, [x18, #PC_CURPCB] /* * TODO: We may need to flush the cache here. */ /* Switch to the new pmap */ ldr x5, [x4, #PCB_L1ADDR] msr ttbr0_el1, x5 isb /* Invalidate the TLB */ dsb sy tlbi vmalle1is dsb sy isb /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] ldp x10, x11, [x4, #PCB_REGS + 10 * 8] ldp x12, x13, [x4, #PCB_REGS + 12 * 8] ldp x14, x15, [x4, #PCB_REGS + 14 * 8] ldp x16, x17, [x4, #PCB_REGS + 16 * 8] ldr x19, [x4, #PCB_REGS + 19 * 8] ldp x20, x21, [x4, #PCB_REGS + 20 * 8] ldp x22, x23, [x4, #PCB_REGS + 22 * 8] ldp x24, x25, [x4, #PCB_REGS + 24 * 8] ldp x26, x27, [x4, #PCB_REGS + 26 * 8] ldp x28, x29, [x4, #PCB_REGS + 28 * 8] ldr x30, [x4, #PCB_REGS + 30 * 8] ret END(cpu_throw) /* * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) * * x0 = old * x1 = new * x2 = mtx * x3 to x7, x16 and x17 are caller saved */ ENTRY(cpu_switch) /* Store the new curthread */ str x1, [x18, #PC_CURTHREAD] /* And the new pcb */ ldr x4, [x1, #TD_PCB] str x4, [x18, #PC_CURPCB] /* * Save the old context. */ ldr x4, [x0, #TD_PCB] /* Store the callee-saved registers */ stp x8, x9, [x4, #PCB_REGS + 8 * 8] stp x10, x11, [x4, #PCB_REGS + 10 * 8] stp x12, x13, [x4, #PCB_REGS + 12 * 8] stp x14, x15, [x4, #PCB_REGS + 14 * 8] stp x16, x17, [x4, #PCB_REGS + 16 * 8] stp x18, x19, [x4, #PCB_REGS + 18 * 8] stp x20, x21, [x4, #PCB_REGS + 20 * 8] stp x22, x23, [x4, #PCB_REGS + 22 * 8] stp x24, x25, [x4, #PCB_REGS + 24 * 8] stp x26, x27, [x4, #PCB_REGS + 26 * 8] stp x28, x29, [x4, #PCB_REGS + 28 * 8] str x30, [x4, #PCB_REGS + 30 * 8] /* And the old stack pointer */ mov x5, sp mrs x6, tpidr_el0 stp x5, x6, [x4, #PCB_SP] #ifdef VFP mov x19, x0 mov x20, x1 mov x21, x2 + /* Load the pcb address */ + mov x1, x4 bl vfp_save_state mov x2, x21 mov x1, x20 mov x0, x19 #endif /* * Restore the saved context. */ ldr x4, [x1, #TD_PCB] /* * TODO: We may need to flush the cache here if switching * to a user process. */ /* Switch to the new pmap */ ldr x5, [x4, #PCB_L1ADDR] msr ttbr0_el1, x5 isb /* Invalidate the TLB */ dsb sy tlbi vmalle1is dsb sy isb /* Release the old thread */ str x2, [x0, #TD_LOCK] #if defined(SCHED_ULE) && defined(SMP) /* Read the value in blocked_lock */ ldr x0, =_C_LABEL(blocked_lock) ldr x1, [x0] /* Load curthread */ ldr x2, [x18, #PC_CURTHREAD] 1: ldr x3, [x2, #TD_LOCK] cmp x3, x1 b.eq 1b #endif /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldp x8, x9, [x4, #PCB_REGS + 8 * 8] ldp x10, x11, [x4, #PCB_REGS + 10 * 8] ldp x12, x13, [x4, #PCB_REGS + 12 * 8] ldp x14, x15, [x4, #PCB_REGS + 14 * 8] ldp x16, x17, [x4, #PCB_REGS + 16 * 8] ldr x19, [x4, #PCB_REGS + 19 * 8] ldp x20, x21, [x4, #PCB_REGS + 20 * 8] ldp x22, x23, [x4, #PCB_REGS + 22 * 8] ldp x24, x25, [x4, #PCB_REGS + 24 * 8] ldp x26, x27, [x4, #PCB_REGS + 26 * 8] ldp x28, x29, [x4, #PCB_REGS + 28 * 8] ldr x30, [x4, #PCB_REGS + 30 * 8] str xzr, [x4, #PCB_REGS + 18 * 8] ret .Lcpu_switch_panic_str: .asciz "cpu_switch: %p\0" END(cpu_switch) ENTRY(fork_trampoline) mov x0, x8 mov x1, x9 mov x2, sp mov fp, #0 /* Stack traceback stops here. */ bl _C_LABEL(fork_exit) /* Restore sp and lr */ ldp x0, x1, [sp] msr sp_el0, x0 mov lr, x1 /* Restore the registers other than x0 and x1 */ ldp x2, x3, [sp, #TF_X + 2 * 8] ldp x4, x5, [sp, #TF_X + 4 * 8] ldp x6, x7, [sp, #TF_X + 6 * 8] ldp x8, x9, [sp, #TF_X + 8 * 8] ldp x10, x11, [sp, #TF_X + 10 * 8] ldp x12, x13, [sp, #TF_X + 12 * 8] ldp x14, x15, [sp, #TF_X + 14 * 8] ldp x16, x17, [sp, #TF_X + 16 * 8] ldr x19, [sp, #TF_X + 19 * 8] ldp x20, x21, [sp, #TF_X + 20 * 8] ldp x22, x23, [sp, #TF_X + 22 * 8] ldp x24, x25, [sp, #TF_X + 24 * 8] ldp x26, x27, [sp, #TF_X + 26 * 8] ldp x28, x29, [sp, #TF_X + 28 * 8] /* Skip x30 as it was restored above as lr */ /* * Disable interrupts to avoid * overwriting spsr_el1 by an IRQ exception. */ msr daifset, #2 /* Restore elr and spsr */ ldp x0, x1, [sp, #16] msr elr_el1, x0 msr spsr_el1, x1 /* Finally x0 and x1 */ ldp x0, x1, [sp, #TF_X + 0 * 8] ldr x18, [sp, #TF_X + 18 * 8] /* * No need for interrupts reenabling since PSR * will be set to the desired value anyway. */ eret END(fork_trampoline) ENTRY(savectx) /* Store the callee-saved registers */ stp x8, x9, [x0, #PCB_REGS + 8 * 8] stp x10, x11, [x0, #PCB_REGS + 10 * 8] stp x12, x13, [x0, #PCB_REGS + 12 * 8] stp x14, x15, [x0, #PCB_REGS + 14 * 8] stp x16, x17, [x0, #PCB_REGS + 16 * 8] stp x18, x19, [x0, #PCB_REGS + 18 * 8] stp x20, x21, [x0, #PCB_REGS + 20 * 8] stp x22, x23, [x0, #PCB_REGS + 22 * 8] stp x24, x25, [x0, #PCB_REGS + 24 * 8] stp x26, x27, [x0, #PCB_REGS + 26 * 8] stp x28, x29, [x0, #PCB_REGS + 28 * 8] str x30, [x0, #PCB_REGS + 30 * 8] /* And the old stack pointer */ mov x5, sp mrs x6, tpidr_el0 stp x5, x6, [x0, #PCB_SP] /* Store the VFP registers */ #ifdef VFP - mov x29, lr + mov x28, lr + mov x1, x0 /* move pcb to the correct register */ + mov x0, xzr /* td = NULL */ bl vfp_save_state - mov lr, x29 + mov lr, x28 #endif ret END(savectx) Index: head/sys/arm64/arm64/vfp.c =================================================================== --- head/sys/arm64/arm64/vfp.c (revision 286224) +++ head/sys/arm64/arm64/vfp.c (revision 286225) @@ -1,203 +1,209 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #ifdef VFP #include #include #include #include #include #include #include #include /* Sanity check we can store all the VFP registers */ CTASSERT(sizeof(((struct pcb *)0)->pcb_vfp) == 16 * 32); static void vfp_enable(void) { uint32_t cpacr; cpacr = READ_SPECIALREG(cpacr_el1); cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE; WRITE_SPECIALREG(cpacr_el1, cpacr); isb(); } static void vfp_disable(void) { uint32_t cpacr; cpacr = READ_SPECIALREG(cpacr_el1); cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1; WRITE_SPECIALREG(cpacr_el1, cpacr); isb(); } /* * Called when the thread is dying. If the thread was the last to use the * VFP unit mark it as unused to tell the kernel the fp state is unowned. * Ensure the VFP unit is off so we get an exception on the next access. */ void vfp_discard(struct thread *td) { if (PCPU_GET(fpcurthread) == td) PCPU_SET(fpcurthread, NULL); vfp_disable(); } void -vfp_save_state(struct thread *td) +vfp_save_state(struct thread *td, struct pcb *pcb) { __int128_t *vfp_state; uint64_t fpcr, fpsr; uint32_t cpacr; + KASSERT(pcb != NULL, ("NULL vfp pcb")); + KASSERT(td == NULL || td->td_pcb == pcb, ("Invalid vfp pcb")); + + if (td == NULL) + td = curthread; + critical_enter(); /* * Only store the registers if the VFP is enabled, * i.e. return if we are trapping on FP access. */ cpacr = READ_SPECIALREG(cpacr_el1); if ((cpacr & CPACR_FPEN_MASK) == CPACR_FPEN_TRAP_NONE) { KASSERT(PCPU_GET(fpcurthread) == td, ("Storing an invalid VFP state")); - vfp_state = td->td_pcb->pcb_vfp; + vfp_state = pcb->pcb_vfp; __asm __volatile( "mrs %0, fpcr \n" "mrs %1, fpsr \n" "stp q0, q1, [%2, #16 * 0]\n" "stp q2, q3, [%2, #16 * 2]\n" "stp q4, q5, [%2, #16 * 4]\n" "stp q6, q7, [%2, #16 * 6]\n" "stp q8, q9, [%2, #16 * 8]\n" "stp q10, q11, [%2, #16 * 10]\n" "stp q12, q13, [%2, #16 * 12]\n" "stp q14, q15, [%2, #16 * 14]\n" "stp q16, q17, [%2, #16 * 16]\n" "stp q18, q19, [%2, #16 * 18]\n" "stp q20, q21, [%2, #16 * 20]\n" "stp q22, q23, [%2, #16 * 22]\n" "stp q24, q25, [%2, #16 * 24]\n" "stp q26, q27, [%2, #16 * 26]\n" "stp q28, q29, [%2, #16 * 28]\n" "stp q30, q31, [%2, #16 * 30]\n" : "=&r"(fpcr), "=&r"(fpsr) : "r"(vfp_state)); - td->td_pcb->pcb_fpcr = fpcr; - td->td_pcb->pcb_fpsr = fpsr; + pcb->pcb_fpcr = fpcr; + pcb->pcb_fpsr = fpsr; dsb(ish); vfp_disable(); } critical_exit(); } void vfp_restore_state(void) { __int128_t *vfp_state; uint64_t fpcr, fpsr; struct pcb *curpcb; u_int cpu; critical_enter(); cpu = PCPU_GET(cpuid); curpcb = curthread->td_pcb; curpcb->pcb_fpflags |= PCB_FP_STARTED; vfp_enable(); /* * If the previous thread on this cpu to use the VFP was not the * current threas, or the current thread last used it on a different * cpu we need to restore the old state. */ if (PCPU_GET(fpcurthread) != curthread || cpu != curpcb->pcb_vfpcpu) { vfp_state = curthread->td_pcb->pcb_vfp; fpcr = curthread->td_pcb->pcb_fpcr; fpsr = curthread->td_pcb->pcb_fpsr; __asm __volatile( "ldp q0, q1, [%2, #16 * 0]\n" "ldp q2, q3, [%2, #16 * 2]\n" "ldp q4, q5, [%2, #16 * 4]\n" "ldp q6, q7, [%2, #16 * 6]\n" "ldp q8, q9, [%2, #16 * 8]\n" "ldp q10, q11, [%2, #16 * 10]\n" "ldp q12, q13, [%2, #16 * 12]\n" "ldp q14, q15, [%2, #16 * 14]\n" "ldp q16, q17, [%2, #16 * 16]\n" "ldp q18, q19, [%2, #16 * 18]\n" "ldp q20, q21, [%2, #16 * 20]\n" "ldp q22, q23, [%2, #16 * 22]\n" "ldp q24, q25, [%2, #16 * 24]\n" "ldp q26, q27, [%2, #16 * 26]\n" "ldp q28, q29, [%2, #16 * 28]\n" "ldp q30, q31, [%2, #16 * 30]\n" "msr fpcr, %0 \n" "msr fpsr, %1 \n" : : "r"(fpcr), "r"(fpsr), "r"(vfp_state)); PCPU_SET(fpcurthread, curthread); curpcb->pcb_vfpcpu = cpu; } critical_exit(); } void vfp_init(void) { uint64_t pfr; /* Check if there is a vfp unit present */ pfr = READ_SPECIALREG(id_aa64pfr0_el1); if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE) return; /* Disable to be enabled when it's used */ vfp_disable(); } SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL); #endif Index: head/sys/arm64/arm64/vm_machdep.c =================================================================== --- head/sys/arm64/arm64/vm_machdep.c (revision 286224) +++ head/sys/arm64/arm64/vm_machdep.c (revision 286225) @@ -1,260 +1,260 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) { struct pcb *pcb2; struct trapframe *tf; if ((flags & RFPROC) == 0) return; if (td1 == curthread) { /* * Save the tpidr_el0 and the vfp state, these normally happen * in cpu_switch, but if userland changes these then forks * this may not have happened. */ td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0); #ifdef VFP if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0) - vfp_save_state(td1); + vfp_save_state(td1, td1->td_pcb); #endif } pcb2 = (struct pcb *)(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1; td2->td_pcb = pcb2; bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); td2->td_pcb->pcb_l1addr = vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l1); tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); bcopy(td1->td_frame, tf, sizeof(*tf)); tf->tf_x[0] = 0; tf->tf_x[1] = 0; tf->tf_spsr = 0; td2->td_frame = tf; /* Set the return value registers for fork() */ td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return; td2->td_pcb->pcb_x[9] = (uintptr_t)td2; td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame; td2->td_pcb->pcb_vfpcpu = UINT_MAX; /* Setup to release spin count in fork_exit(). */ td2->td_md.md_spinlock_count = 1; td2->td_md.md_saved_daif = 0; } void cpu_reset(void) { printf("cpu_reset"); while(1) __asm volatile("wfi" ::: "memory"); } void cpu_thread_swapin(struct thread *td) { } void cpu_thread_swapout(struct thread *td) { } void cpu_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame; frame = td->td_frame; switch (error) { case 0: frame->tf_x[0] = td->td_retval[0]; frame->tf_x[1] = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; /* carry bit */ break; case ERESTART: frame->tf_elr -= 4; break; case EJUSTRETURN: break; default: frame->tf_spsr |= PSR_C; /* carry bit */ frame->tf_x[0] = error; break; } } /* * Initialize machine state (pcb and trap frame) for a new thread about to * upcall. Put enough state in the new thread's PCB to get it to go back * userret(), where we can intercept it again to set the return (upcall) * Address and stack, along with those from upcals that are from other sources * such as those generated in thread_userret() itself. */ void cpu_set_upcall(struct thread *td, struct thread *td0) { bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); td->td_pcb->pcb_x[8] = (uintptr_t)fork_return; td->td_pcb->pcb_x[9] = (uintptr_t)td; td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; td->td_pcb->pcb_vfpcpu = UINT_MAX; /* Setup to release spin count in fork_exit(). */ td->td_md.md_spinlock_count = 1; td->td_md.md_saved_daif = 0; } /* * Set that machine state for performing an upcall that has to * be done in thread_userret() so that those upcalls generated * in thread_userret() itself can be done as well. */ void cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, stack_t *stack) { struct trapframe *tf = td->td_frame; tf->tf_sp = STACKALIGN(stack->ss_sp + stack->ss_size); tf->tf_elr = (register_t)entry; tf->tf_x[0] = (register_t)arg; } int cpu_set_user_tls(struct thread *td, void *tls_base) { struct pcb *pcb; if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) return (EINVAL); pcb = td->td_pcb; pcb->pcb_tpidr_el0 = (register_t)tls_base; return (0); } void cpu_thread_exit(struct thread *td) { } void cpu_thread_alloc(struct thread *td) { td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages * PAGE_SIZE) - 1; td->td_frame = (struct trapframe *)STACKALIGN( td->td_pcb - 1); } void cpu_thread_free(struct thread *td) { } void cpu_thread_clean(struct thread *td) { } /* * Intercept the return address from a freshly forked process that has NOT * been scheduled yet. * * This is needed to make kernel threads stay in kernel mode. */ void cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) { td->td_pcb->pcb_x[8] = (uintptr_t)func; td->td_pcb->pcb_x[9] = (uintptr_t)arg; td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline; td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; td->td_pcb->pcb_vfpcpu = UINT_MAX; } void cpu_exit(struct thread *td) { } void swi_vm(void *v) { /* Nothing to do here - busdma bounce buffers are not implemented. */ } Index: head/sys/arm64/include/vfp.h =================================================================== --- head/sys/arm64/include/vfp.h (revision 286224) +++ head/sys/arm64/include/vfp.h (revision 286225) @@ -1,46 +1,46 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_VFP_H_ #define _MACHINE_VFP_H_ #ifdef _KERNEL #ifndef LOCORE void vfp_init(void); void vfp_discard(struct thread *); void vfp_restore_state(void); -void vfp_save_state(struct thread *); +void vfp_save_state(struct thread *, struct pcb *); #endif #endif #endif /* !_MACHINE_VFP_H_ */