diff --git a/lib/libc/arm/aeabi/aeabi_vfp.h b/lib/libc/arm/aeabi/aeabi_vfp.h index f87f9acfd0a2..3b70fe06fab3 100644 --- a/lib/libc/arm/aeabi/aeabi_vfp.h +++ b/lib/libc/arm/aeabi/aeabi_vfp.h @@ -1,129 +1,129 @@ /* * Copyright (C) 2013 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #ifndef AEABI_VFP_H #define AEABI_VFP_H #include /* * ASM helper macros. These allow the functions to be changed depending on * the endian-ness we are building for. */ /* Allow the name of the function to be changed depending on the ABI */ #ifndef __ARM_PCS_VFP #define AEABI_ENTRY(x) ENTRY(__aeabi_ ## x ## _vfp) #define AEABI_END(x) END(__aeabi_ ## x ## _vfp) #else #define AEABI_ENTRY(x) ENTRY(__aeabi_ ## x) #define AEABI_END(x) END(__aeabi_ ## x) #endif /* * These should be used when a function either takes, or returns a floating * point falue. They will load the data from an ARM to a VFP register(s), * or from a VFP to an ARM register */ #ifdef __ARM_BIG_ENDIAN #define LOAD_DREG(vreg, reg0, reg1) vmov vreg, reg1, reg0 #define UNLOAD_DREG(reg0, reg1, vreg) vmov reg1, reg0, vreg #else #define LOAD_DREG(vreg, reg0, reg1) vmov vreg, reg0, reg1 #define UNLOAD_DREG(reg0, reg1, vreg) vmov reg0, reg1, vreg #endif #define LOAD_SREGS(vreg0, vreg1, reg0, reg1) vmov vreg0, vreg1, reg0, reg1 #define LOAD_SREG(vreg, reg) vmov vreg, reg #define UNLOAD_SREG(reg, vreg) vmov reg, vreg /* * C Helper macros */ -#if __ARM_ARCH >= 6 && !defined(SOFTFLOAT_FOR_GCC) +#if !defined(SOFTFLOAT_FOR_GCC) /* * Generate a function that will either call into the VFP implementation, * or the soft float version for a given __aeabi_* helper. The function * will take a single argument of the type given by in_type. */ #define AEABI_FUNC(name, in_type, soft_func) \ __aeabi_ ## name(in_type a) \ { \ if (_libc_arm_fpu_present) \ return __aeabi_ ## name ## _vfp(a); \ else \ return soft_func (a); \ } /* As above, but takes two arguments of the same type */ #define AEABI_FUNC2(name, in_type, soft_func) \ __aeabi_ ## name(in_type a, in_type b) \ { \ if (_libc_arm_fpu_present) \ return __aeabi_ ## name ## _vfp(a, b); \ else \ return soft_func (a, b); \ } /* As above, but with the soft float arguments reversed */ #define AEABI_FUNC2_REV(name, in_type, soft_func) \ __aeabi_ ## name(in_type a, in_type b) \ { \ if (_libc_arm_fpu_present) \ return __aeabi_ ## name ## _vfp(a, b); \ else \ return soft_func (b, a); \ } #else /* * Helper macros for when we are only able to use the softfloat * version of these functions, i.e. on arm before armv6. */ #define AEABI_FUNC(name, in_type, soft_func) \ __aeabi_ ## name(in_type a) \ { \ return soft_func (a); \ } /* As above, but takes two arguments of the same type */ #define AEABI_FUNC2(name, in_type, soft_func) \ __aeabi_ ## name(in_type a, in_type b) \ { \ return soft_func (a, b); \ } /* As above, but with the soft float arguments reversed */ #define AEABI_FUNC2_REV(name, in_type, soft_func) \ __aeabi_ ## name(in_type a, in_type b) \ { \ return soft_func (b, a); \ } #endif #endif diff --git a/lib/libc/arm/gen/_setjmp.S b/lib/libc/arm/gen/_setjmp.S index 19b8b6b07059..9e655d2e9e2e 100644 --- a/lib/libc/arm/gen/_setjmp.S +++ b/lib/libc/arm/gen/_setjmp.S @@ -1,135 +1,135 @@ /* $NetBSD: _setjmp.S,v 1.12 2013/04/19 13:45:45 matt Exp $ */ /* * Copyright (c) 1997 Mark Brinicombe * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if !defined(__SOFTFP__) && !defined(__VFP_FP__) && !defined(__ARM_PCS) #error FPA is not supported anymore #endif #if !defined(_STANDALONE) .fpu vfp #endif #include #include /* * C library -- _setjmp, _longjmp * * _longjmp(a,v) * will generate a "return(v)" from the last call to * _setjmp(a) * by restoring registers from the stack. * The previous signal state is NOT restored. * * Note: r0 is the return value * r1-r3,ip are scratch registers in functions */ ENTRY(_setjmp) ldr r1, .L_setjmp_magic -#if !defined(_STANDALONE) && __ARM_ARCH >= 6 && !defined(SOFTFLOAT_FOR_GCC) +#if !defined(_STANDALONE) && !defined(SOFTFLOAT_FOR_GCC) add r2, r0, #(_JB_REG_D8 * 4) vstmia r2, {d8-d15} vmrs r2, fpscr str r2, [r0, #(_JB_REG_FPSCR * 4)] -#endif /* !_STANDALONE && __ARM_ARCH >= 6 */ +#endif /* !_STANDALONE && !SOFTFLOAT_FOR_GCC */ str r1, [r0] add r0, r0, #(_JB_REG_R4 * 4) /* Store integer registers */ #ifndef __thumb__ stmia r0, {r4-r14} #else stmia r0, {r4-r12} str r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)] str r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)] #endif mov r0, #0x00000000 RET END(_setjmp) .L_setjmp_magic: .word _JB_MAGIC__SETJMP WEAK_ALIAS(___longjmp, _longjmp) ENTRY(_longjmp) ldr r2, [r0] /* get magic from jmp_buf */ ldr ip, .L_setjmp_magic /* load magic */ teq ip, r2 /* magic correct? */ bne botch /* no, botch */ -#if !defined(_STANDALONE) && __ARM_ARCH >= 6 && !defined(SOFTFLOAT_FOR_GCC) +#if !defined(_STANDALONE) && !defined(SOFTFLOAT_FOR_GCC) add ip, r0, #(_JB_REG_D8 * 4) vldmia ip, {d8-d15} ldr ip, [r0, #(_JB_REG_FPSCR * 4)] vmsr fpscr, ip -#endif /* !_STANDALONE && __ARM_ARCH >= 6 */ +#endif /* !_STANDALONE && !SOFTFLOAT_FOR_GCC */ add r0, r0, #(_JB_REG_R4 * 4) /* Restore integer registers */ #ifndef __thumb__ ldmia r0, {r4-r14} #else ldmia r0, {r4-r12} ldr r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)] ldr r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)] #endif /* Validate sp and r14 */ teq sp, #0 it ne teqne r14, #0 it eq beq botch /* Set return value */ movs r0, r1 it eq moveq r0, #0x00000001 RET /* validation failed, die die die. */ botch: #if !defined(_STANDALONE) bl PIC_SYM(_C_LABEL(longjmperror), PLT) bl PIC_SYM(_C_LABEL(abort), PLT) 1: b 1b /* Cannot get here */ #else b . #endif END(_longjmp) .section .note.GNU-stack,"",%progbits diff --git a/lib/libc/arm/gen/setjmp.S b/lib/libc/arm/gen/setjmp.S index 5a6c899e2b23..e7f8b788e878 100644 --- a/lib/libc/arm/gen/setjmp.S +++ b/lib/libc/arm/gen/setjmp.S @@ -1,142 +1,142 @@ /* $NetBSD: setjmp.S,v 1.14 2013/04/19 13:45:45 matt Exp $ */ /* * Copyright (c) 1997 Mark Brinicombe * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if !defined(__SOFTFP__) && !defined(__VFP_FP__) && !defined(__ARM_PCS) #error FPA is not supported anymore #endif .fpu vfp #include #include /* * C library -- setjmp, longjmp * * longjmp(a,v) * will generate a "return(v)" from the last call to * setjmp(a) * by restoring registers from the stack. * The previous signal state is restored. */ ENTRY(setjmp) /* Block all signals and retrieve the old signal mask */ stmfd sp!, {r0, r14} add r2, r0, #(_JB_SIGMASK * 4) /* oset */ mov r1, #0x00000000 /* set */ mov r0, #0x00000001 /* SIG_BLOCK */ bl PIC_SYM(_C_LABEL(sigprocmask), PLT) ldmfd sp!, {r0, r14} ldr r1, .Lsetjmp_magic -#if __ARM_ARCH >= 6 && !defined(SOFTFLOAT_FOR_GCC) +#if !defined(SOFTFLOAT_FOR_GCC) add r2, r0, #(_JB_REG_D8 * 4) vstmia r2, {d8-d15} vmrs r2, fpscr str r2, [r0, #(_JB_REG_FPSCR * 4)] #endif str r1, [r0] /* store magic */ /* Store integer registers */ add r0, r0, #(_JB_REG_R4 * 4) #ifndef __thumb__ stmia r0, {r4-r14} #else stmia r0, {r4-r12} str r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)] str r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)] #endif mov r0, #0x00000000 RET .Lsetjmp_magic: .word _JB_MAGIC_SETJMP END(setjmp) .weak _C_LABEL(longjmp) .set _C_LABEL(longjmp), _C_LABEL(__longjmp) ENTRY(__longjmp) ldr r2, [r0] ldr ip, .Lsetjmp_magic teq r2, ip bne .Lbotch /* Restore the signal mask. */ stmfd sp!, {r0-r2, r14} mov r2, #0x00000000 add r1, r0, #(_JB_SIGMASK * 4) /* Signal mask */ mov r0, #3 /* SIG_SETMASK */ bl PIC_SYM(_C_LABEL(sigprocmask), PLT) ldmfd sp!, {r0-r2, r14} -#if __ARM_ARCH >= 6 && !defined(SOFTFLOAT_FOR_GCC) +#if !defined(SOFTFLOAT_FOR_GCC) add ip, r0, #(_JB_REG_D8 * 4) vldmia ip, {d8-d15} ldr ip, [r0, #(_JB_REG_FPSCR * 4)] vmsr fpscr, ip #endif add r0, r0, #(_JB_REG_R4 * 4) /* Restore integer registers */ #ifndef __thumb__ ldmia r0, {r4-r14} #else ldmia r0, {r4-r12} ldr r13, [r0, #((_JB_REG_R13 - _JB_REG_R4) * 4)] ldr r14, [r0, #((_JB_REG_R14 - _JB_REG_R4) * 4)] #endif /* Validate sp and r14 */ teq sp, #0 it ne teqne r14, #0 it eq beq .Lbotch /* Set return value */ movs r0, r1 it eq moveq r0, #0x00000001 RET /* validation failed, die die die. */ .Lbotch: bl PIC_SYM(_C_LABEL(longjmperror), PLT) bl PIC_SYM(_C_LABEL(abort), PLT) 1: b 1b /* Cannot get here */ END(__longjmp) .section .note.GNU-stack,"",%progbits diff --git a/lib/libsys/arm/__vdso_gettc.c b/lib/libsys/arm/__vdso_gettc.c index ea70dec35cd8..cb4bdec1e8ef 100644 --- a/lib/libsys/arm/__vdso_gettc.c +++ b/lib/libsys/arm/__vdso_gettc.c @@ -1,88 +1,81 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Konstantin Belousov * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include "libc_private.h" -#if __ARM_ARCH >= 6 static inline uint64_t cp15_cntvct_get(void) { uint64_t reg; __asm __volatile("mrrc\tp15, 1, %Q0, %R0, c14" : "=r" (reg)); return (reg); } static inline uint64_t cp15_cntpct_get(void) { uint64_t reg; __asm __volatile("mrrc\tp15, 0, %Q0, %R0, c14" : "=r" (reg)); return (reg); } -#endif #pragma weak __vdso_gettc int __vdso_gettc(const struct vdso_timehands *th, u_int *tc) { if (th->th_algo != VDSO_TH_ALGO_ARM_GENTIM) return (ENOSYS); -#if __ARM_ARCH >= 6 /* * Userspace gettimeofday() is only enabled on ARMv7 CPUs, but * libc is compiled for ARMv6. Due to clang issues, .arch * armv7-a directive does not work. */ __asm __volatile(".word\t0xf57ff06f" : : : "memory"); /* isb */ *tc = th->th_physical == 0 ? cp15_cntvct_get() : cp15_cntpct_get(); return (0); -#else - *tc = 0; - return (ENOSYS); -#endif } #pragma weak __vdso_gettimekeep int __vdso_gettimekeep(struct vdso_timekeep **tk) { return (_elf_aux_info(AT_TIMEKEEP, tk, sizeof(*tk))); } diff --git a/lib/msun/arm/fenv.c b/lib/msun/arm/fenv.c index 9f172d5fd7c9..05b3adb05f81 100644 --- a/lib/msun/arm/fenv.c +++ b/lib/msun/arm/fenv.c @@ -1,317 +1,313 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2004 David Schultz * Copyright (c) 2013 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define __fenv_static #include "fenv.h" #include -#if __ARM_ARCH >= 6 -#define FENV_ARMv6 -#endif - /* When SOFTFP_ABI is defined we are using the softfp ABI. */ #if defined(__VFP_FP__) && !defined(__ARM_PCS_VFP) #define SOFTFP_ABI #endif #ifndef FENV_MANGLE /* * Hopefully the system ID byte is immutable, so it's valid to use * this as a default environment. */ const fenv_t __fe_dfl_env = 0; #endif /* If this is a non-mangled softfp version special processing is required */ -#if defined(FENV_MANGLE) || !defined(SOFTFP_ABI) || !defined(FENV_ARMv6) +#if defined(FENV_MANGLE) || !defined(SOFTFP_ABI) /* * The following macros map between the softfloat emulator's flags and * the hardware's FPSR. The hardware this file was written for doesn't * have rounding control bits, so we stick those in the system ID byte. */ #ifndef __ARM_PCS_VFP #define __set_env(env, flags, mask, rnd) env = ((flags) \ | (mask)<<_FPUSW_SHIFT \ | (rnd) << 24) #define __env_flags(env) ((env) & FE_ALL_EXCEPT) #define __env_mask(env) (((env) >> _FPUSW_SHIFT) \ & FE_ALL_EXCEPT) #define __env_round(env) (((env) >> 24) & _ROUND_MASK) #include "fenv-softfloat.h" #endif #ifdef __GNUC_GNU_INLINE__ #error "This file must be compiled with C99 'inline' semantics" #endif extern inline int feclearexcept(int __excepts); extern inline int fegetexceptflag(fexcept_t *__flagp, int __excepts); extern inline int fesetexceptflag(const fexcept_t *__flagp, int __excepts); extern inline int feraiseexcept(int __excepts); extern inline int fetestexcept(int __excepts); extern inline int fegetround(void); extern inline int fesetround(int __round); extern inline int fegetenv(fenv_t *__envp); extern inline int feholdexcept(fenv_t *__envp); extern inline int fesetenv(const fenv_t *__envp); extern inline int feupdateenv(const fenv_t *__envp); extern inline int feenableexcept(int __mask); extern inline int fedisableexcept(int __mask); extern inline int fegetexcept(void); #else /* !FENV_MANGLE && SOFTFP_ABI */ /* Set by libc when the VFP unit is enabled */ extern int _libc_arm_fpu_present; int __softfp_feclearexcept(int __excepts); int __softfp_fegetexceptflag(fexcept_t *__flagp, int __excepts); int __softfp_fesetexceptflag(const fexcept_t *__flagp, int __excepts); int __softfp_feraiseexcept(int __excepts); int __softfp_fetestexcept(int __excepts); int __softfp_fegetround(void); int __softfp_fesetround(int __round); int __softfp_fegetenv(fenv_t *__envp); int __softfp_feholdexcept(fenv_t *__envp); int __softfp_fesetenv(const fenv_t *__envp); int __softfp_feupdateenv(const fenv_t *__envp); int __softfp_feenableexcept(int __mask); int __softfp_fedisableexcept(int __mask); int __softfp_fegetexcept(void); int __vfp_feclearexcept(int __excepts); int __vfp_fegetexceptflag(fexcept_t *__flagp, int __excepts); int __vfp_fesetexceptflag(const fexcept_t *__flagp, int __excepts); int __vfp_feraiseexcept(int __excepts); int __vfp_fetestexcept(int __excepts); int __vfp_fegetround(void); int __vfp_fesetround(int __round); int __vfp_fegetenv(fenv_t *__envp); int __vfp_feholdexcept(fenv_t *__envp); int __vfp_fesetenv(const fenv_t *__envp); int __vfp_feupdateenv(const fenv_t *__envp); int __vfp_feenableexcept(int __mask); int __vfp_fedisableexcept(int __mask); int __vfp_fegetexcept(void); static int __softfp_round_to_vfp(int round) { switch (round) { case FE_TONEAREST: default: return VFP_FE_TONEAREST; case FE_TOWARDZERO: return VFP_FE_TOWARDZERO; case FE_UPWARD: return VFP_FE_UPWARD; case FE_DOWNWARD: return VFP_FE_DOWNWARD; } } static int __softfp_round_from_vfp(int round) { switch (round) { case VFP_FE_TONEAREST: default: return FE_TONEAREST; case VFP_FE_TOWARDZERO: return FE_TOWARDZERO; case VFP_FE_UPWARD: return FE_UPWARD; case VFP_FE_DOWNWARD: return FE_DOWNWARD; } } int feclearexcept(int __excepts) { if (_libc_arm_fpu_present) __vfp_feclearexcept(__excepts); __softfp_feclearexcept(__excepts); return (0); } int fegetexceptflag(fexcept_t *__flagp, int __excepts) { fexcept_t __vfp_flagp; __vfp_flagp = 0; if (_libc_arm_fpu_present) __vfp_fegetexceptflag(&__vfp_flagp, __excepts); __softfp_fegetexceptflag(__flagp, __excepts); *__flagp |= __vfp_flagp; return (0); } int fesetexceptflag(const fexcept_t *__flagp, int __excepts) { if (_libc_arm_fpu_present) __vfp_fesetexceptflag(__flagp, __excepts); __softfp_fesetexceptflag(__flagp, __excepts); return (0); } int feraiseexcept(int __excepts) { if (_libc_arm_fpu_present) __vfp_feraiseexcept(__excepts); __softfp_feraiseexcept(__excepts); return (0); } int fetestexcept(int __excepts) { int __got_excepts; __got_excepts = 0; if (_libc_arm_fpu_present) __got_excepts = __vfp_fetestexcept(__excepts); __got_excepts |= __softfp_fetestexcept(__excepts); return (__got_excepts); } int fegetround(void) { if (_libc_arm_fpu_present) return __softfp_round_from_vfp(__vfp_fegetround()); return __softfp_fegetround(); } int fesetround(int __round) { if (_libc_arm_fpu_present) __vfp_fesetround(__softfp_round_to_vfp(__round)); __softfp_fesetround(__round); return (0); } int fegetenv(fenv_t *__envp) { fenv_t __vfp_envp; __vfp_envp = 0; if (_libc_arm_fpu_present) __vfp_fegetenv(&__vfp_envp); __softfp_fegetenv(__envp); *__envp |= __vfp_envp; return (0); } int feholdexcept(fenv_t *__envp) { fenv_t __vfp_envp; __vfp_envp = 0; if (_libc_arm_fpu_present) __vfp_feholdexcept(&__vfp_envp); __softfp_feholdexcept(__envp); *__envp |= __vfp_envp; return (0); } int fesetenv(const fenv_t *__envp) { if (_libc_arm_fpu_present) __vfp_fesetenv(__envp); __softfp_fesetenv(__envp); return (0); } int feupdateenv(const fenv_t *__envp) { if (_libc_arm_fpu_present) __vfp_feupdateenv(__envp); __softfp_feupdateenv(__envp); return (0); } int feenableexcept(int __mask) { int __unmasked; __unmasked = 0; if (_libc_arm_fpu_present) __unmasked = __vfp_feenableexcept(__mask); __unmasked |= __softfp_feenableexcept(__mask); return (__unmasked); } int fedisableexcept(int __mask) { int __unmasked; __unmasked = 0; if (_libc_arm_fpu_present) __unmasked = __vfp_fedisableexcept(__mask); __unmasked |= __softfp_fedisableexcept(__mask); return (__unmasked); } int fegetexcept(void) { int __unmasked; __unmasked = 0; if (_libc_arm_fpu_present) __unmasked = __vfp_fegetexcept(); __unmasked |= __softfp_fegetexcept(); return (__unmasked); } #endif