diff --git a/sys/arm64/arm64/freebsd32_machdep.c b/sys/arm64/arm64/freebsd32_machdep.c --- a/sys/arm64/arm64/freebsd32_machdep.c +++ b/sys/arm64/arm64/freebsd32_machdep.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -55,6 +56,9 @@ extern void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask); +SYSCTL_NODE(_compat, OID_AUTO, arm, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, + "32-bit mode"); + /* * The first two fields of a ucontext_t are the signal mask and the machine * context. The next field is uc_link; we want to avoid destroying the link diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S --- a/sys/arm64/arm64/support.S +++ b/sys/arm64/arm64/support.S @@ -32,12 +32,16 @@ #include __FBSDID("$FreeBSD$"); +#include + #include #include #include #include "assym.inc" +#define EMUL_SWP_ATTEMPTS 0x04 + .macro check_user_access user_arg, limit, bad_addr_func ldr x7, =(\limit) cmp x\user_arg, x7 @@ -55,6 +59,70 @@ ret END(fsu_fault) +#ifdef COMPAT_FREEBSD32 +/* SWP/SWPB atomic failed; return EFAULT. */ +ENTRY(swp_fault) + SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */ + EXIT_USER_ACCESS_CHECK(w0, x1) + mov x0, #EFAULT + ret +END(swp_fault) + +ENTRY(swp_emulate_atomic) + adr x6, swp_fault /* Load the fault handler */ + mov w5, #0 /* Attempts */ + SET_FAULT_HANDLER(x6, x4) /* And set it */ + ENTER_USER_ACCESS(w6, x4) + + /* + * x1 holds the address of our swap value; we pull the new value from it + * and stash the old value there since the caller will need it. The + * return value of swp_emulate_atomic() matches store(9)/fetch(9) + * semantics so that we can just re-use the existing fault handler. + */ + ldr w7, [x1] + + cmp x2, #0 /* is_swpb */ + b.ne 2f + + /* swp */ +1: ldxr w2, [x0] /* Stash the old value in w2 */ + stxr w3, w7, [x0] /* Store new value */ + cbz w3, 3f /* Success? */ + + add w5, w5, #1 + cmp w5, #(EMUL_SWP_ATTEMPTS + 1) + b.eq 4f /* EAGAIN */ + b 1b /* Try again */ + + /* swpb */ +2: ldxrb w2, [x0] + stxrb w3, w7, [x0] + cbz w3, 3f + + add w5, w5, #1 + cmp w5, #(EMUL_SWP_ATTEMPTS + 1) + b.eq 4f /* EAGAIN */ + b 2b /* Try again */ + +3: mov w0, w2 /* Return the old value */ + EXIT_USER_ACCESS(w6) + SET_FAULT_HANDLER(xzr, x6) + + /* + * w0 has the value to be returned, stash it in + * *val and return success. + */ + str x0, [x1] + mov x0, #0 + ret + + /* Too many attempts, avoid DoS */ +4: mov x0, #EAGAIN + ret +END(swp_emulate_atomic) +#endif + /* * int casueword32_llsc(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) */ diff --git a/sys/arm64/arm64/undefined.c b/sys/arm64/arm64/undefined.c --- a/sys/arm64/arm64/undefined.c +++ b/sys/arm64/arm64/undefined.c @@ -39,14 +39,45 @@ #include #include #include +#include #include +#include #include +#include #include #include +#include +#include + +/* Low bit masked off */ +#define INSN_COND(insn) ((insn >> 28) & ~0x1) +#define INSN_COND_INVERTED(insn) ((insn >> 28) & 0x1) +#define INSN_COND_EQ 0x00 /* NE */ +#define INSN_COND_CS 0x02 /* CC */ +#define INSN_COND_MI 0x04 /* PL */ +#define INSN_COND_VS 0x06 /* VC */ +#define INSN_COND_HI 0x08 /* LS */ +#define INSN_COND_GE 0x0a /* LT */ +#define INSN_COND_GT 0x0c /* LE */ +#define INSN_COND_AL 0x0e /* Always */ + MALLOC_DEFINE(M_UNDEF, "undefhandler", "Undefined instruction handler data"); +#ifdef COMPAT_FREEBSD32 +#ifndef EMUL_SWP +#define EMUL_SWP 0 +#endif + +SYSCTL_DECL(_compat_arm); + +static bool compat32_emul_swp = EMUL_SWP; +SYSCTL_BOOL(_compat_arm, OID_AUTO, emul_swp, + CTLFLAG_RWTUN | CTLFLAG_MPSAFE, &compat32_emul_swp, 0, + "Enable SWP/SWPB emulation"); +#endif + struct undef_handler { LIST_ENTRY(undef_handler) uh_link; undef_handler_t uh_handler; @@ -88,6 +119,54 @@ return (0); } +static bool +arm_cond_match(uint32_t insn, struct trapframe *frame) +{ + uint64_t spsr; + uint32_t cond; + bool invert; + bool match; + + /* + * Generally based on the function of the same name in NetBSD, though + * condition bits left in their original position rather than shifting + * over the low bit that indicates inversion for quicker sanity checking + * against spec. + */ + spsr = frame->tf_spsr; + cond = INSN_COND(insn); + invert = INSN_COND_INVERTED(insn); + + switch (cond) { + case INSN_COND_EQ: + match = (spsr & PSR_Z) != 0; + break; + case INSN_COND_CS: + match = (spsr & PSR_C) != 0; + break; + case INSN_COND_MI: + match = (spsr & PSR_N) != 0; + break; + case INSN_COND_VS: + match = (spsr & PSR_V) != 0; + break; + case INSN_COND_HI: + match = (spsr & (PSR_C | PSR_Z)) == PSR_C; + break; + case INSN_COND_GE: + match = (!(spsr & PSR_N) == !(spsr & PSR_V)); + break; + case INSN_COND_GT: + match = !(spsr & PSR_Z) && (!(spsr & PSR_N) == !(spsr & PSR_V)); + break; + case INSN_COND_AL: + match = true; + break; + } + + return (!match != !invert); +} + #ifdef COMPAT_FREEBSD32 /* arm32 GDB breakpoints */ #define GDB_BREAKPOINT 0xe6000011 @@ -113,6 +192,78 @@ } return 0; } + +extern int swp_emulate_atomic(vm_offset_t addr, uint32_t *val, bool is_swpb); + +static int +swp_emulate(vm_offset_t va, uint32_t insn, struct trapframe *frame, + uint32_t esr) +{ + ksiginfo_t ksi; + struct thread *td; + vm_offset_t vaddr; + uint64_t *regs; + uint32_t val; + int error, Rn, Rd, Rm; + bool is_swpb; + + td = curthread; + + /* + * swp, swpb only; there are no Thumb swp/swpb instructions so we can + * safely bail out if we're in Thumb mode. + */ + if (!compat32_emul_swp || !SV_PROC_FLAG(td->td_proc, SV_ILP32) || + (frame->tf_spsr & PSR_T) != 0) + return (0); + else if ((insn & 0x0fb00ff0) != 0x01000090) + return (0); + else if (!arm_cond_match(insn, frame)) + goto next; /* Handled, but does nothing */ + + Rn = (insn & 0xf0000) >> 16; + Rd = (insn & 0xf000) >> 12; + Rm = (insn & 0xf); + + regs = frame->tf_x; + vaddr = regs[Rn] & 0xffffffff; + val = regs[Rm]; + + /* Enforce alignment for swp. */ + is_swpb = (insn & 0x00400000) != 0; + if (!is_swpb && (vaddr & 3) != 0) + goto fault; + + do { + error = swp_emulate_atomic(vaddr, &val, is_swpb); + if (error == EFAULT) + goto fault; + MPASS(error == 0 || error == EAGAIN); + + /* + * Avoid potential DoS, e.g., on CPUs that don't implement + * global monitors. + */ + if (error == EAGAIN) + maybe_yield(); + } while (error != 0); + + regs[Rd] = val; + +next: + /* No thumb SWP/SWPB */ + frame->tf_elr += 4; //INSN_SIZE; + + return (1); +fault: + ksiginfo_init_trap(&ksi); + ksi.ksi_signo = SIGSEGV; + ksi.ksi_code = SEGV_MAPERR; + ksi.ksi_addr = (void *)va; + trapsignal(td, &ksi); + + return (1); +} #endif void @@ -125,6 +276,7 @@ install_undef_handler(false, id_aa64mmfr2_handler); #ifdef COMPAT_FREEBSD32 install_undef_handler(true, gdb_trapper); + install_undef_handler(true, swp_emulate); #endif } diff --git a/sys/arm64/conf/GENERIC b/sys/arm64/conf/GENERIC --- a/sys/arm64/conf/GENERIC +++ b/sys/arm64/conf/GENERIC @@ -20,6 +20,8 @@ cpu ARM64 ident GENERIC +options EMUL_SWP + include "std.arm64" include "std.dev" diff --git a/sys/conf/options.arm64 b/sys/conf/options.arm64 --- a/sys/conf/options.arm64 +++ b/sys/conf/options.arm64 @@ -14,6 +14,8 @@ # Binary compatibility COMPAT_FREEBSD32 opt_global.h +# Emulate SWP/SWPB for COMPAT_FREEBSD32 +EMUL_SWP opt_global.h # EFI Runtime services support EFIRT opt_efirt.h