diff --git a/sys/arm64/arm64/bus_space_asm.S b/sys/arm64/arm64/bus_space_asm.S index bc9b41f96952..699a27bedab4 100644 --- a/sys/arm64/arm64/bus_space_asm.S +++ b/sys/arm64/arm64/bus_space_asm.S @@ -1,477 +1,481 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ +#include #include + ENTRY(generic_bs_r_1) ldrb w0, [x1, x2] ret END(generic_bs_r_1) ENTRY(generic_bs_r_2) ldrh w0, [x1, x2] ret END(generic_bs_r_2) ENTRY(generic_bs_r_4) ldr w0, [x1, x2] ret END(generic_bs_r_4) ENTRY(generic_bs_r_8) ldr x0, [x1, x2] ret END(generic_bs_r_8) ENTRY(generic_bs_rm_1) /* If there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldrb w1, [x0] strb w1, [x3], #1 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rm_1) ENTRY(generic_bs_rm_2) /* If there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldrh w1, [x0] strh w1, [x3], #2 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rm_2) ENTRY(generic_bs_rm_4) /* If there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldr w1, [x0] str w1, [x3], #4 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rm_4) ENTRY(generic_bs_rm_8) /* If there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldr x1, [x0] str x1, [x3], #8 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rm_8) ENTRY(generic_bs_rr_1) /* Is there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldrb w1, [x0], #1 strb w1, [x3], #1 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rr_1) ENTRY(generic_bs_rr_2) /* Is there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldrh w1, [x0], #2 strh w1, [x3], #2 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rr_2) ENTRY(generic_bs_rr_4) /* Is there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldr w1, [x0], #4 str w1, [x3], #4 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rr_4) ENTRY(generic_bs_rr_8) /* Is there is anything to read. */ cbz x4, 2f /* Calculate the device address. */ add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Read the data. */ 1: ldr x1, [x0], #8 str x1, [x3], #8 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_rr_8) ENTRY(generic_bs_w_1) strb w3, [x1, x2] ret END(generic_bs_w_1) ENTRY(generic_bs_w_2) strh w3, [x1, x2] ret END(generic_bs_w_2) ENTRY(generic_bs_w_4) str w3, [x1, x2] ret END(generic_bs_w_4) ENTRY(generic_bs_w_8) str x3, [x1, x2] ret END(generic_bs_w_8) ENTRY(generic_bs_wm_1) /* If there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldrb w1, [x3], #1 strb w1, [x0] subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wm_1) ENTRY(generic_bs_wm_2) /* If there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldrh w1, [x3], #2 strh w1, [x0] subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wm_2) ENTRY(generic_bs_wm_4) /* If there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldr w1, [x3], #4 str w1, [x0] subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wm_4) ENTRY(generic_bs_wm_8) /* If there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldr x1, [x3], #8 str x1, [x0] subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wm_8) ENTRY(generic_bs_wr_1) /* Is there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldrb w1, [x3], #1 strb w1, [x0], #1 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wr_1) ENTRY(generic_bs_wr_2) /* Is there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldrh w1, [x3], #2 strh w1, [x0], #2 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wr_2) ENTRY(generic_bs_wr_4) /* Is there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldr w1, [x3], #4 str w1, [x0], #4 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wr_4) ENTRY(generic_bs_wr_8) /* Is there is anything to write. */ cbz x4, 2f add x0, x1, x2 /* * x0 = The device address. * x3 = The kernel address. * x4 = Count */ /* Write the data */ 1: ldr x1, [x3], #8 str x1, [x0], #8 subs x4, x4, #1 b.ne 1b 2: ret END(generic_bs_wr_8) ENTRY(generic_bs_fault) mov x0, #-1 ret END(generic_bs_fault) ENTRY(generic_bs_peek_1) .globl generic_bs_peek_1f generic_bs_peek_1f: ldrb w0, [x1, x2] /* Checked instruction */ dsb sy strb w0,[x3] mov x0, #0 ret END(generic_bs_peek_1) ENTRY(generic_bs_peek_2) .globl generic_bs_peek_2f generic_bs_peek_2f: ldrh w0, [x1, x2] /* Checked instruction */ dsb sy strh w0,[x3] mov x0, #0 ret END(generic_bs_peek_2) ENTRY(generic_bs_peek_4) .globl generic_bs_peek_4f generic_bs_peek_4f: ldr w0, [x1, x2] /* Checked instruction */ dsb sy str w0,[x3] mov x0, #0 ret END(generic_bs_peek_4) ENTRY(generic_bs_peek_8) .globl generic_bs_peek_8f generic_bs_peek_8f: ldr x0, [x1, x2] /* Checked instruction */ dsb sy str x0,[x3] mov x0, #0 ret END(generic_bs_peek_8) ENTRY(generic_bs_poke_1) .globl generic_bs_poke_1f generic_bs_poke_1f: strb w3, [x1, x2] /* Checked instruction */ dsb sy mov x0, #0 ret END(generic_bs_poke_1) ENTRY(generic_bs_poke_2) .globl generic_bs_poke_2f generic_bs_poke_2f: strh w3, [x1, x2] /* Checked instruction */ dsb sy mov x0, #0 ret END(generic_bs_poke_2) ENTRY(generic_bs_poke_4) .globl generic_bs_poke_4f generic_bs_poke_4f: str w3, [x1, x2] /* Checked instruction */ dsb sy mov x0, #0 ret END(generic_bs_poke_4) ENTRY(generic_bs_poke_8) .globl generic_bs_poke_8f generic_bs_poke_8f: str x3, [x1, x2] /* Checked instruction */ dsb sy mov x0, #0 ret END(generic_bs_poke_8) + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S index 23f56ae85daa..26dd0b4cf14f 100644 --- a/sys/arm64/arm64/copyinout.S +++ b/sys/arm64/arm64/copyinout.S @@ -1,222 +1,225 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ -#include +#include #include +#include #include #include #include "assym.inc" .macro check_user_access user_arg, size_arg, bad_access_func adds x6, x\user_arg, x\size_arg b.cs \bad_access_func ldr x7, =VM_MAXUSER_ADDRESS cmp x6, x7 b.hi \bad_access_func .endm /* * Fault handler for the copy{in,out} functions below. */ ENTRY(copyio_fault) SET_FAULT_HANDLER(xzr, x1) /* Clear the handler */ EXIT_USER_ACCESS_CHECK(w0, x1) copyio_fault_nopcb: mov x0, #EFAULT ret END(copyio_fault) /* * Copies from a kernel to user address * * int copyout(const void *kaddr, void *udaddr, size_t len) */ ENTRY(copyout) cbz x2, 1f check_user_access 1, 2, copyio_fault_nopcb b copycommon 1: mov x0, xzr /* return 0 */ ret END(copyout) /* * Copies from a user to kernel address * * int copyin(const void *uaddr, void *kdaddr, size_t len) */ ENTRY(copyin) cbz x2, 1f check_user_access 0, 2, copyio_fault_nopcb b copycommon 1: mov x0, xzr /* return 0 */ ret END(copyin) /* * Copies a string from a user to kernel address * * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done) */ ENTRY(copyinstr) mov x5, xzr /* count = 0 */ mov w4, #1 /* If zero return faulure */ cbz x2, 3f /* If len == 0 then skip loop */ adr x6, copyio_fault /* Get the handler address */ SET_FAULT_HANDLER(x6, x7) /* Set the handler */ ldr x7, =VM_MAXUSER_ADDRESS 1: cmp x0, x7 b.cs copyio_fault ldtrb w4, [x0] /* Load from uaddr */ add x0, x0, #1 /* Next char */ strb w4, [x1], #1 /* Store in kaddr */ add x5, x5, #1 /* count++ */ cbz w4, 2f /* Break when NUL-terminated */ sub x2, x2, #1 /* len-- */ cbnz x2, 1b 2: SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ 3: cbz x3, 4f /* Check if done != NULL */ str x5, [x3] /* done = count */ 4: mov w1, #ENAMETOOLONG /* Load ENAMETOOLONG to return if failed */ cmp w4, #0 /* Check if we saved the NUL-terminator */ csel w0, wzr, w1, eq /* If so return success, else failure */ ret END(copyinstr) /* * Local helper * * x0 - src pointer * x1 - dst pointer * x2 - size * lr - the return address, so jump here instead of calling * * This function is optimized to minimize concurrent memory accesses. In * present form it is suited for cores with a single memory prefetching * unit. * ARM64TODO: * Consider using separate functions for each ARM64 core. Adding memory * access interleaving might increase a total throughput on A57 or A72. */ .text .align 4 .local copycommon .type copycommon,@function copycommon: adr x6, copyio_fault /* Get the handler address */ SET_FAULT_HANDLER(x6, x7) /* Set the handler */ ENTER_USER_ACCESS(w6, x7) /* Check alignment */ orr x3, x0, x1 ands x3, x3, 0x07 b.eq aligned /* Unaligned is byte by byte copy */ byte_by_byte: ldrb w3, [x0], #0x01 strb w3, [x1], #0x01 subs x2, x2, #0x01 b.ne byte_by_byte b ending aligned: cmp x2, #0x10 b.lt lead_out cmp x2, #0x40 b.lt by_dwords_start /* Block copy */ lsr x15, x2, #0x06 by_blocks: ldp x3, x4, [x0], #0x10 ldp x5, x6, [x0], #0x10 ldp x7, x8, [x0], #0x10 ldp x9, x10, [x0], #0x10 stp x3, x4, [x1], #0x10 stp x5, x6, [x1], #0x10 stp x7, x8, [x1], #0x10 stp x9, x10, [x1], #0x10 subs x15, x15, #0x01 b.ne by_blocks and x2, x2, #0x3f by_dwords_start: lsr x15, x2, #0x04 cbz x15, lead_out by_dwords: ldp x3, x4, [x0], #0x10 stp x3, x4, [x1], #0x10 subs x15, x15, #0x01 b.ne by_dwords /* Less than 16 bytes to copy */ lead_out: tbz x2, #0x03, last_word ldr x3, [x0], #0x08 str x3, [x1], #0x08 last_word: tbz x2, #0x02, last_hword ldr w3, [x0], #0x04 str w3, [x1], #0x04 last_hword: tbz x2, #0x01, last_byte ldrh w3, [x0], #0x02 strh w3, [x1], #0x02 last_byte: tbz x2, #0x00, ending ldrb w3, [x0] strb w3, [x1] ending: EXIT_USER_ACCESS_CHECK(w6, x7) SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ mov x0, xzr /* return 0 */ ret .size copycommon, . - copycommon + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/cpufunc_asm.S b/sys/arm64/arm64/cpufunc_asm.S index 5a668aeb542e..a13b97c2cdee 100644 --- a/sys/arm64/arm64/cpufunc_asm.S +++ b/sys/arm64/arm64/cpufunc_asm.S @@ -1,192 +1,196 @@ /*- * Copyright (c) 2014 Robin Randhawa * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ +#include #include + #include #include #include "assym.inc" /* * FIXME: * Need big.LITTLE awareness at some point. * Using arm64_p[id]cache_line_size may not be the best option. * Need better SMP awareness. */ .text .align 2 .Lpage_mask: .word PAGE_MASK /* * Macro to handle the cache. This takes the start address in x0, length * in x1. It will corrupt x0, x1, x2, x3, and x4. */ .macro cache_handle_range dcop = 0, ic = 0, icop = 0 .if \ic == 0 adrp x3, dcache_line_size /* Load the D cache line size */ ldr x3, [x3, :lo12:dcache_line_size] .else adrp x3, idcache_line_size /* Load the I & D cache line size */ ldr x3, [x3, :lo12:idcache_line_size] .endif sub x4, x3, #1 /* Get the address mask */ and x2, x0, x4 /* Get the low bits of the address */ add x1, x1, x2 /* Add these to the size */ bic x0, x0, x4 /* Clear the low bit of the address */ .if \ic != 0 mov x2, x0 /* Save the address */ mov x4, x1 /* Save the size */ .endif 1: dc \dcop, x0 add x0, x0, x3 /* Move to the next line */ subs x1, x1, x3 /* Reduce the size */ b.hi 1b /* Check if we are done */ dsb ish .if \ic != 0 2: ic \icop, x2 add x2, x2, x3 /* Move to the next line */ subs x4, x4, x3 /* Reduce the size */ b.hi 2b /* Check if we are done */ dsb ish isb .endif .endm ENTRY(arm64_nullop) ret END(arm64_nullop) /* * Generic functions to read/modify/write the internal coprocessor registers */ ENTRY(arm64_tlb_flushID) dsb ishst #ifdef SMP tlbi vmalle1is #else tlbi vmalle1 #endif dsb ish isb ret END(arm64_tlb_flushID) /* * void arm64_dcache_wb_range(void *, vm_size_t) */ ENTRY(arm64_dcache_wb_range) cache_handle_range dcop = cvac ret END(arm64_dcache_wb_range) /* * void arm64_dcache_wbinv_range(void *, vm_size_t) */ ENTRY(arm64_dcache_wbinv_range) cache_handle_range dcop = civac ret END(arm64_dcache_wbinv_range) /* * void arm64_dcache_inv_range(void *, vm_size_t) * * Note, we must not invalidate everything. If the range is too big we * must use wb-inv of the entire cache. */ ENTRY(arm64_dcache_inv_range) cache_handle_range dcop = ivac ret END(arm64_dcache_inv_range) /* * void arm64_dic_idc_icache_sync_range(void *, vm_size_t) * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb. * When the CTR_EL0.DIC bit is set icache invalidation becomes an isb. */ ENTRY(arm64_dic_idc_icache_sync_range) dsb ishst isb ret END(arm64_dic_idc_icache_sync_range) /* * void arm64_idc_aliasing_icache_sync_range(void *, vm_size_t) * When the CTR_EL0.IDC bit is set cleaning to PoU becomes a dsb. */ ENTRY(arm64_idc_aliasing_icache_sync_range) dsb ishst ic ialluis dsb ish isb ret END(arm64_idc_aliasing_icache_sync_range) /* * void arm64_aliasing_icache_sync_range(void *, vm_size_t) */ ENTRY(arm64_aliasing_icache_sync_range) /* * XXX Temporary solution - I-cache flush should be range based for * PIPT cache or IALLUIS for VIVT or VIPT caches */ /* cache_handle_range dcop = cvau, ic = 1, icop = ivau */ cache_handle_range dcop = cvau ic ialluis dsb ish isb ret END(arm64_aliasing_icache_sync_range) /* * int arm64_icache_sync_range_checked(void *, vm_size_t) */ ENTRY(arm64_icache_sync_range_checked) adr x5, cache_maint_fault SET_FAULT_HANDLER(x5, x6) /* XXX: See comment in arm64_icache_sync_range */ cache_handle_range dcop = cvau ic ialluis dsb ish isb SET_FAULT_HANDLER(xzr, x6) mov x0, #0 ret END(arm64_icache_sync_range_checked) ENTRY(cache_maint_fault) SET_FAULT_HANDLER(xzr, x1) mov x0, #EFAULT ret END(cache_maint_fault) + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/exception.S b/sys/arm64/arm64/exception.S index 41d7e7f7ae1f..662684588e0c 100644 --- a/sys/arm64/arm64/exception.S +++ b/sys/arm64/arm64/exception.S @@ -1,326 +1,329 @@ /*- * Copyright (c) 2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ +#include + #include #include #include "assym.inc" .text /* * This is limited to 28 instructions as it's placed in the exception vector * slot that is 32 instructions long. We need one for the branch, and three * for the prologue. */ .macro save_registers_head el .if \el == 1 mov x18, sp stp x0, x1, [sp, #(TF_X - TF_SIZE - 128)]! .else stp x0, x1, [sp, #(TF_X - TF_SIZE)]! .endif stp x2, x3, [sp, #(2 * 8)] stp x4, x5, [sp, #(4 * 8)] stp x6, x7, [sp, #(6 * 8)] stp x8, x9, [sp, #(8 * 8)] stp x10, x11, [sp, #(10 * 8)] stp x12, x13, [sp, #(12 * 8)] stp x14, x15, [sp, #(14 * 8)] stp x16, x17, [sp, #(16 * 8)] stp x18, x19, [sp, #(18 * 8)] stp x20, x21, [sp, #(20 * 8)] stp x22, x23, [sp, #(22 * 8)] stp x24, x25, [sp, #(24 * 8)] stp x26, x27, [sp, #(26 * 8)] stp x28, x29, [sp, #(28 * 8)] .if \el == 0 mrs x18, sp_el0 .endif mrs x10, elr_el1 mrs x11, spsr_el1 mrs x12, esr_el1 mrs x13, far_el1 stp x18, lr, [sp, #(TF_SP - TF_X)]! stp x10, x11, [sp, #(TF_ELR)] stp x12, x13, [sp, #(TF_ESR)] mrs x18, tpidr_el1 .endm .macro save_registers el add x29, sp, #(TF_SIZE) .if \el == 0 #if defined(PERTHREAD_SSP) /* Load the SSP canary to sp_el0 */ ldr x1, [x18, #(PC_CURTHREAD)] add x1, x1, #(TD_MD_CANARY) msr sp_el0, x1 #endif /* Apply the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f mov w0, #1 blr x1 1: ldr x0, [x18, #PC_CURTHREAD] bl ptrauth_exit_el0 ldr x0, [x18, #(PC_CURTHREAD)] bl dbg_monitor_enter /* Unmask debug and SError exceptions */ msr daifclr, #(DAIF_D | DAIF_A) .else /* * Unmask debug and SError exceptions. * For EL1, debug exceptions are conditionally unmasked in * do_el1h_sync(). */ msr daifclr, #(DAIF_A) .endif .endm .macro restore_registers el /* * Mask all exceptions, x18 may change in the interrupt exception * handler. */ msr daifset, #(DAIF_ALL) .if \el == 0 ldr x0, [x18, #PC_CURTHREAD] mov x1, sp bl dbg_monitor_exit ldr x0, [x18, #PC_CURTHREAD] bl ptrauth_enter_el0 /* Remove the SSBD (CVE-2018-3639) workaround if needed */ ldr x1, [x18, #PC_SSBD] cbz x1, 1f mov w0, #0 blr x1 1: .endif ldp x18, lr, [sp, #(TF_SP)] ldp x10, x11, [sp, #(TF_ELR)] .if \el == 0 msr sp_el0, x18 .endif msr spsr_el1, x11 msr elr_el1, x10 ldp x0, x1, [sp, #(TF_X + 0 * 8)] ldp x2, x3, [sp, #(TF_X + 2 * 8)] ldp x4, x5, [sp, #(TF_X + 4 * 8)] ldp x6, x7, [sp, #(TF_X + 6 * 8)] ldp x8, x9, [sp, #(TF_X + 8 * 8)] ldp x10, x11, [sp, #(TF_X + 10 * 8)] ldp x12, x13, [sp, #(TF_X + 12 * 8)] ldp x14, x15, [sp, #(TF_X + 14 * 8)] ldp x16, x17, [sp, #(TF_X + 16 * 8)] .if \el == 0 /* * We only restore the callee saved registers when returning to * userland as they may have been updated by a system call or signal. */ ldp x18, x19, [sp, #(TF_X + 18 * 8)] ldp x20, x21, [sp, #(TF_X + 20 * 8)] ldp x22, x23, [sp, #(TF_X + 22 * 8)] ldp x24, x25, [sp, #(TF_X + 24 * 8)] ldp x26, x27, [sp, #(TF_X + 26 * 8)] ldp x28, x29, [sp, #(TF_X + 28 * 8)] .else ldr x29, [sp, #(TF_X + 29 * 8)] .endif .if \el == 0 add sp, sp, #(TF_SIZE) .else mov sp, x18 mrs x18, tpidr_el1 .endif .endm .macro do_ast mrs x19, daif /* Make sure the IRQs are enabled before calling ast() */ bic x19, x19, #PSR_I 1: /* * Mask interrupts while checking the ast pending flag */ msr daifset, #(DAIF_INTR) /* Read the current thread AST mask */ ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */ ldr w1, [x1, #(TD_AST)] /* Check if we have a non-zero AST mask */ cbz w1, 2f /* Restore interrupts */ msr daif, x19 /* handle the ast */ mov x0, sp bl _C_LABEL(ast) /* Re-check for new ast scheduled */ b 1b 2: .endm #ifdef KMSAN /* * The KMSAN runtime relies on a TLS block to track initialization and origin * state for function parameters and return values. To keep this state * consistent in the face of asynchronous kernel-mode traps, the runtime * maintains a stack of blocks: when handling an exception or interrupt, * kmsan_intr_enter() pushes the new block to be used until the handler is * complete, at which point kmsan_intr_leave() restores the previous block. * * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that * may have happened while in kernel-mode. In particular, they are not required * around amd64_syscall() or ast() calls. Otherwise, kmsan_intr_enter() can be * called unconditionally, without distinguishing between entry from user-mode * or kernel-mode. */ #define KMSAN_ENTER bl kmsan_intr_enter #define KMSAN_LEAVE bl kmsan_intr_leave #else #define KMSAN_ENTER #define KMSAN_LEAVE #endif ENTRY(handle_el1h_sync) save_registers 1 KMSAN_ENTER ldr x0, [x18, #PC_CURTHREAD] mov x1, sp bl do_el1h_sync KMSAN_LEAVE restore_registers 1 ERET END(handle_el1h_sync) ENTRY(handle_el1h_irq) save_registers 1 KMSAN_ENTER mov x0, sp bl intr_irq_handler KMSAN_LEAVE restore_registers 1 ERET END(handle_el1h_irq) ENTRY(handle_el1h_serror) save_registers 1 KMSAN_ENTER mov x0, sp 1: bl do_serror b 1b KMSAN_LEAVE END(handle_el1h_serror) ENTRY(handle_el0_sync) save_registers 0 KMSAN_ENTER ldr x0, [x18, #PC_CURTHREAD] mov x1, sp str x1, [x0, #TD_FRAME] bl do_el0_sync do_ast KMSAN_LEAVE restore_registers 0 ERET END(handle_el0_sync) ENTRY(handle_el0_irq) save_registers 0 KMSAN_ENTER mov x0, sp bl intr_irq_handler do_ast KMSAN_LEAVE restore_registers 0 ERET END(handle_el0_irq) ENTRY(handle_el0_serror) save_registers 0 KMSAN_ENTER mov x0, sp 1: bl do_serror b 1b KMSAN_LEAVE END(handle_el0_serror) ENTRY(handle_empty_exception) save_registers 0 KMSAN_ENTER mov x0, sp 1: bl unhandled_exception b 1b KMSAN_LEAVE END(handle_empty_exception) .macro vector name, el .align 7 save_registers_head \el b handle_\name dsb sy isb /* Break instruction to ensure we aren't executing code here. */ brk 0x42 .endm .macro vempty el vector empty_exception \el .endm .align 11 .globl exception_vectors exception_vectors: vempty 1 /* Synchronous EL1t */ vempty 1 /* IRQ EL1t */ vempty 1 /* FIQ EL1t */ vempty 1 /* Error EL1t */ vector el1h_sync 1 /* Synchronous EL1h */ vector el1h_irq 1 /* IRQ EL1h */ vempty 1 /* FIQ EL1h */ vector el1h_serror 1 /* Error EL1h */ vector el0_sync 0 /* Synchronous 64-bit EL0 */ vector el0_irq 0 /* IRQ 64-bit EL0 */ vempty 0 /* FIQ 64-bit EL0 */ vector el0_serror 0 /* Error 64-bit EL0 */ vector el0_sync 0 /* Synchronous 32-bit EL0 */ vector el0_irq 0 /* IRQ 32-bit EL0 */ vempty 0 /* FIQ 32-bit EL0 */ vector el0_serror 0 /* Error 32-bit EL0 */ +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/hyp_stub.S b/sys/arm64/arm64/hyp_stub.S index 42f76da95062..ee486edf67a0 100644 --- a/sys/arm64/arm64/hyp_stub.S +++ b/sys/arm64/arm64/hyp_stub.S @@ -1,65 +1,69 @@ /* * Copyright (c) 2017 Alexandru Elisei * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include + #include .macro vempty .align 7 1: b 1b .endm /* * Install a new exception vector table with the base address supplied by the * parameter in register x0. */ .macro vector_stub_el1h_sync .align 7 msr vbar_el2, x0 ERET .endm .align 11 .globl hyp_stub_vectors hyp_stub_vectors: vempty /* Synchronous EL2t */ vempty /* IRQ EL2t */ vempty /* FIQ EL2t */ vempty /* SError EL2t */ vempty /* Synchronous EL2h */ vempty /* IRQ EL2h */ vempty /* FIQ EL2h */ vempty /* SError EL2h */ vector_stub_el1h_sync /* Synchronous 64-bit EL1 */ vempty /* IRQ 64-bit EL1 */ vempty /* FIQ 64-bit EL1 */ vempty /* SError 64-bit EL1 */ vempty /* Synchronous 32-bit EL1 */ vempty /* IRQ 32-bit EL1 */ vempty /* FIQ 32-bit EL1 */ vempty /* SError 32-bit EL1 */ + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S index fffebe8f2b02..d1120e0476a2 100644 --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -1,1008 +1,1011 @@ /*- * Copyright (c) 2012-2014 Andrew Turner * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "assym.inc" #include "opt_kstack_pages.h" +#include #include #include #include #include #include #include #include #include #include #define VIRT_BITS 48 #if PAGE_SIZE == PAGE_SIZE_16K /* * The number of level 3 tables to create. 32 will allow for 1G of address * space, the same as a single level 2 page with 4k pages. */ #define L3_PAGE_COUNT 32 #endif /* * The size of our bootstrap stack. */ #define BOOT_STACK_SIZE (KSTACK_PAGES * PAGE_SIZE) .globl kernbase .set kernbase, KERNBASE /* * We assume: * MMU on with an identity map, or off * D-Cache: off * I-Cache: on or off * We are loaded at a 2MiB aligned address */ ENTRY(_start) /* Enter the kernel exception level */ bl enter_kernel_el /* * Disable the MMU. We may have entered the kernel with it on and * will need to update the tables later. If this has been set up * with anything other than a VA == PA map then this will fail, * but in this case the code to find where we are running from * would have also failed. */ dsb sy mrs x2, sctlr_el1 bic x2, x2, SCTLR_M msr sctlr_el1, x2 isb /* Set the context id */ msr contextidr_el1, xzr /* Get the virt -> phys offset */ bl get_load_phys_addr /* * At this point: * x28 = Our physical load address */ /* Create the page tables */ bl create_pagetables /* * At this point: * x27 = TTBR0 table * x26 = Kernel L1 table * x24 = TTBR1 table */ /* Enable the mmu */ bl start_mmu /* Load the new ttbr0 pagetable */ adrp x27, pagetable_l0_ttbr0 add x27, x27, :lo12:pagetable_l0_ttbr0 /* Jump to the virtual address space */ ldr x15, .Lvirtdone br x15 virtdone: BTI_J /* Set up the stack */ adrp x25, initstack_end add x25, x25, :lo12:initstack_end sub sp, x25, #PCB_SIZE /* Zero the BSS */ ldr x15, .Lbss ldr x14, .Lend 1: str xzr, [x15], #8 cmp x15, x14 b.lo 1b #if defined(PERTHREAD_SSP) /* Set sp_el0 to the boot canary for early per-thread SSP to work */ adrp x15, boot_canary add x15, x15, :lo12:boot_canary msr sp_el0, x15 #endif /* Backup the module pointer */ mov x1, x0 sub sp, sp, #BOOTPARAMS_SIZE mov x0, sp str x1, [x0, #BP_MODULEP] adrp x25, initstack add x25, x25, :lo12:initstack str x25, [x0, #BP_KERN_STACK] str x27, [x0, #BP_KERN_TTBR0] str x23, [x0, #BP_BOOT_EL] str x4, [x0, #BP_HCR_EL2] #ifdef KASAN /* Save bootparams */ mov x19, x0 /* Bootstrap an early shadow map for the boot stack. */ ldr x0, [x0, #BP_KERN_STACK] ldr x1, =BOOT_STACK_SIZE bl kasan_init_early /* Restore bootparams */ mov x0, x19 #endif /* trace back starts here */ mov fp, #0 /* Branch to C code */ bl initarm /* We are done with the boot params */ add sp, sp, #BOOTPARAMS_SIZE /* * Enable pointer authentication in the kernel. We set the keys for * thread0 in initarm so have to wait until it returns to enable it. * If we were to enable it in initarm then any authentication when * returning would fail as it was called with pointer authentication * disabled. */ bl ptrauth_start bl mi_startup /* We should not get here */ brk 0 .align 3 .Lvirtdone: .quad virtdone .Lbss: .quad __bss_start .Lend: .quad __bss_end END(_start) #ifdef SMP /* * void * mpentry_psci(unsigned long) * * Called by a core when it is being brought online with psci. * The data in x0 is passed straight to init_secondary. */ ENTRY(mpentry_psci) mov x26, xzr b mpentry_common END(mpentry_psci) /* * void * mpentry_spintable(void) * * Called by a core when it is being brought online with a spin-table. * Reads the new CPU ID and passes this to init_secondary. */ ENTRY(mpentry_spintable) ldr x26, =spintable_wait b mpentry_common END(mpentry_spintable) /* Wait for the current CPU to be released */ LENTRY(spintable_wait) /* Read the affinity bits from mpidr_el1 */ mrs x1, mpidr_el1 ldr x2, =CPU_AFF_MASK and x1, x1, x2 adrp x2, ap_cpuid 1: ldr x0, [x2, :lo12:ap_cpuid] cmp x0, x1 b.ne 1b str xzr, [x2, :lo12:ap_cpuid] dsb sy sev ret LEND(mpentry_spintable) LENTRY(mpentry_common) /* Disable interrupts */ msr daifset, #DAIF_INTR /* Enter the kernel exception level */ bl enter_kernel_el /* Set the context id */ msr contextidr_el1, xzr /* Load the kernel page table */ adrp x24, pagetable_l0_ttbr1 add x24, x24, :lo12:pagetable_l0_ttbr1 /* Load the identity page table */ adrp x27, pagetable_l0_ttbr0_bootstrap add x27, x27, :lo12:pagetable_l0_ttbr0_bootstrap /* Enable the mmu */ bl start_mmu /* Load the new ttbr0 pagetable */ adrp x27, pagetable_l0_ttbr0 add x27, x27, :lo12:pagetable_l0_ttbr0 /* Jump to the virtual address space */ ldr x15, =mp_virtdone br x15 mp_virtdone: BTI_J /* * Allow this CPU to wait until the kernel is ready for it, * e.g. with spin-table but each CPU uses the same release address */ cbz x26, 1f blr x26 1: /* Start using the AP boot stack */ adrp x4, bootstack ldr x4, [x4, :lo12:bootstack] mov sp, x4 #if defined(PERTHREAD_SSP) /* Set sp_el0 to the boot canary for early per-thread SSP to work */ adrp x15, boot_canary add x15, x15, :lo12:boot_canary msr sp_el0, x15 #endif /* Load the kernel ttbr0 pagetable */ msr ttbr0_el1, x27 isb /* Invalidate the TLB */ tlbi vmalle1 dsb sy isb /* * Initialize the per-CPU pointer before calling into C code, for the * benefit of kernel sanitizers. */ adrp x18, bootpcpu ldr x18, [x18, :lo12:bootpcpu] msr tpidr_el1, x18 b init_secondary LEND(mpentry_common) #endif /* * If we are started in EL2, configure the required hypervisor * registers and drop to EL1. */ LENTRY(enter_kernel_el) mrs x23, CurrentEL and x23, x23, #(CURRENTEL_EL_MASK) cmp x23, #(CURRENTEL_EL_EL2) b.eq 1f ret 1: /* * Disable the MMU. If the HCR_EL2.E2H field is set we will clear it * which may break address translation. */ dsb sy mrs x2, sctlr_el2 bic x2, x2, SCTLR_M msr sctlr_el2, x2 isb /* Configure the Hypervisor */ ldr x2, =(HCR_RW | HCR_APK | HCR_API) msr hcr_el2, x2 /* Stash value of HCR_EL2 for later */ isb mrs x4, hcr_el2 /* Load the Virtualization Process ID Register */ mrs x2, midr_el1 msr vpidr_el2, x2 /* Load the Virtualization Multiprocess ID Register */ mrs x2, mpidr_el1 msr vmpidr_el2, x2 /* Set the bits that need to be 1 in sctlr_el1 */ ldr x2, .Lsctlr_res1 msr sctlr_el1, x2 /* * On some hardware, e.g., Apple M1, we can't clear E2H, so make sure we * don't trap to EL2 for SIMD register usage to have at least a * minimally usable system. */ tst x4, #HCR_E2H mov x3, #CPTR_RES1 /* HCR_E2H == 0 */ mov x5, #CPTR_FPEN /* HCR_E2H == 1 */ csel x2, x3, x5, eq msr cptr_el2, x2 /* Don't trap to EL2 for CP15 traps */ msr hstr_el2, xzr /* Enable access to the physical timers at EL1 */ mrs x2, cnthctl_el2 orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) msr cnthctl_el2, x2 /* Set the counter offset to a known value */ msr cntvoff_el2, xzr /* Hypervisor trap functions */ adrp x2, hyp_stub_vectors add x2, x2, :lo12:hyp_stub_vectors msr vbar_el2, x2 /* Zero vttbr_el2 so a hypervisor can tell the host and guest apart */ msr vttbr_el2, xzr mov x2, #(PSR_DAIF | PSR_M_EL1h) msr spsr_el2, x2 /* Configure GICv3 CPU interface */ mrs x2, id_aa64pfr0_el1 /* Extract GIC bits from the register */ ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS /* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */ cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT) b.ne 2f mrs x2, icc_sre_el2 orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */ orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */ msr icc_sre_el2, x2 2: /* Set the address to return to our return address */ msr elr_el2, x30 isb eret .align 3 .Lsctlr_res1: .quad SCTLR_RES1 LEND(enter_kernel_el) /* * Get the physical address the kernel was loaded at. */ LENTRY(get_load_phys_addr) /* Load the offset of get_load_phys_addr from KERNBASE */ ldr x28, =(get_load_phys_addr - KERNBASE) /* Load the physical address of get_load_phys_addr */ adr x29, get_load_phys_addr /* Find the physical address of KERNBASE, i.e. our load address */ sub x28, x29, x28 ret LEND(get_load_phys_addr) /* * This builds the page tables containing the identity map, and the kernel * virtual map. * * It relys on: * We were loaded to an address that is on a 2MiB boundary * All the memory must not cross a 1GiB boundaty * x28 contains the physical address we were loaded from * * There are 7 or 8 pages before that address for the page tables * The pages used are: * - The Kernel L3 tables (only for 16k kernel) * - The Kernel L2 table * - The Kernel L1 table * - The Kernel L0 table (TTBR1) * - The identity (PA = VA) L2 table * - The identity (PA = VA) L1 table * - The identity (PA = VA) L0 table (Early TTBR0) * - The Kernel empty L0 table (Late TTBR0) */ LENTRY(create_pagetables) /* Save the Link register */ mov x5, x30 /* Clean the page table */ adrp x6, pagetable add x6, x6, :lo12:pagetable mov x26, x6 adrp x27, pagetable_end add x27, x27, :lo12:pagetable_end 1: stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 stp xzr, xzr, [x6], #16 cmp x6, x27 b.lo 1b /* * Build the TTBR1 maps. */ /* Find the size of the kernel */ mov x6, #(KERNBASE) #if defined(LINUX_BOOT_ABI) /* X19 is used as 'map FDT data' flag */ mov x19, xzr /* No modules or FDT pointer ? */ cbz x0, booti_no_fdt /* * Test if x0 points to modules descriptor(virtual address) or * to FDT (physical address) */ cmp x0, x6 /* x6 is #(KERNBASE) */ b.lo booti_fdt #endif /* Booted with modules pointer */ /* Find modulep - begin */ sub x8, x0, x6 /* * Add space for the module data. When PAGE_SIZE is 4k this will * add at least 2 level 2 blocks (2 * 2MiB). When PAGE_SIZE is * larger it will be at least as large as we use smaller level 3 * pages. */ ldr x7, =((6 * 1024 * 1024) - 1) add x8, x8, x7 b common #if defined(LINUX_BOOT_ABI) booti_fdt: /* Booted by U-Boot booti with FDT data */ /* Set 'map FDT data' flag */ mov x19, #1 booti_no_fdt: /* Booted by U-Boot booti without FTD data */ /* Find the end - begin */ ldr x7, .Lend sub x8, x7, x6 /* * Add one 2MiB page for copy of FDT data (maximum FDT size), * one for metadata and round up */ ldr x7, =(3 * L2_SIZE - 1) add x8, x8, x7 #endif common: #if PAGE_SIZE != PAGE_SIZE_4K /* * Create L3 and L3C pages. The kernel will be loaded at a 2M aligned * address, enabling the creation of L3C pages. However, when the page * size is larger than 4k, L2 blocks are too large to map the kernel * with 2M alignment. */ #define PTE_SHIFT L3_SHIFT #define BUILD_PTE_FUNC build_l3_page_pagetable #else #define PTE_SHIFT L2_SHIFT #define BUILD_PTE_FUNC build_l2_block_pagetable #endif /* Get the number of blocks/pages to allocate, rounded down */ lsr x10, x8, #(PTE_SHIFT) /* Create the kernel space PTE table */ mov x6, x26 mov x7, #(ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) mov x8, #(KERNBASE) mov x9, x28 bl BUILD_PTE_FUNC #undef PTE_SHIFT #undef BUILD_PTE_FUNC #if PAGE_SIZE != PAGE_SIZE_4K /* Move to the l2 table */ ldr x9, =(PAGE_SIZE * L3_PAGE_COUNT) add x26, x26, x9 /* Link the l2 -> l3 table */ mov x9, x6 mov x6, x26 bl link_l2_pagetable #endif /* Move to the l1 table */ add x26, x26, #PAGE_SIZE /* Link the l1 -> l2 table */ mov x9, x6 mov x6, x26 bl link_l1_pagetable /* Move to the l0 table */ add x24, x26, #PAGE_SIZE /* Link the l0 -> l1 table */ mov x9, x6 mov x6, x24 mov x10, #1 bl link_l0_pagetable /* * Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG. * They are only needed early on, so the VA = PA map is uncached. */ add x27, x24, #PAGE_SIZE mov x6, x27 /* The initial page table */ /* Create the VA = PA map */ mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) adrp x16, _start and x16, x16, #(~L2_OFFSET) mov x9, x16 /* PA start */ mov x8, x16 /* VA start (== PA start) */ mov x10, #1 bl build_l2_block_pagetable #if defined(SOCDEV_PA) /* Create a table for the UART */ mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE)) ldr x9, =(L2_SIZE) add x16, x16, x9 /* VA start */ mov x8, x16 /* Store the socdev virtual address */ add x17, x8, #(SOCDEV_PA & L2_OFFSET) adrp x9, socdev_va str x17, [x9, :lo12:socdev_va] mov x9, #(SOCDEV_PA & ~L2_OFFSET) /* PA start */ mov x10, #1 bl build_l2_block_pagetable #endif #if defined(LINUX_BOOT_ABI) /* Map FDT data ? */ cbz x19, 1f /* Create the mapping for FDT data (2 MiB max) */ mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK)) ldr x9, =(L2_SIZE) add x16, x16, x9 /* VA start */ mov x8, x16 mov x9, x0 /* PA start */ /* Update the module pointer to point at the allocated memory */ and x0, x0, #(L2_OFFSET) /* Keep the lower bits */ add x0, x0, x8 /* Add the aligned virtual address */ mov x10, #1 bl build_l2_block_pagetable 1: #endif /* Move to the l1 table */ add x27, x27, #PAGE_SIZE /* Link the l1 -> l2 table */ mov x9, x6 mov x6, x27 bl link_l1_pagetable /* Move to the l0 table */ add x27, x27, #PAGE_SIZE /* Link the l0 -> l1 table */ mov x9, x6 mov x6, x27 mov x10, #1 bl link_l0_pagetable /* Restore the Link register */ mov x30, x5 ret LEND(create_pagetables) /* * Builds an L0 -> L1 table descriptor * * x6 = L0 table * x8 = Virtual Address * x9 = L1 PA (trashed) * x10 = Entry count (trashed) * x11, x12 and x13 are trashed */ LENTRY(link_l0_pagetable) /* * Link an L0 -> L1 table entry. */ /* Find the table index */ lsr x11, x8, #L0_SHIFT and x11, x11, #L0_ADDR_MASK /* Build the L0 block entry */ mov x12, #L0_TABLE orr x12, x12, #(TATTR_UXN_TABLE | TATTR_AP_TABLE_NO_EL0) /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT 1: orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret LEND(link_l0_pagetable) /* * Builds an L1 -> L2 table descriptor * * x6 = L1 table * x8 = Virtual Address * x9 = L2 PA (trashed) * x11, x12 and x13 are trashed */ LENTRY(link_l1_pagetable) /* * Link an L1 -> L2 table entry. */ /* Find the table index */ lsr x11, x8, #L1_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L1 block entry */ mov x12, #L1_TABLE /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] ret LEND(link_l1_pagetable) /* * Builds count 2 MiB page table entry * x6 = L2 table * x7 = Block attributes * x8 = VA start * x9 = PA start (trashed) * x10 = Entry count (trashed) * x11, x12 and x13 are trashed */ LENTRY(build_l2_block_pagetable) /* * Build the L2 table entry. */ /* Find the table index */ lsr x11, x8, #L2_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L2 block entry */ orr x12, x7, #L2_BLOCK orr x12, x12, #(ATTR_DEFAULT) orr x12, x12, #(ATTR_S1_UXN) #ifdef __ARM_FEATURE_BTI_DEFAULT orr x12, x12, #(ATTR_S1_GP) #endif /* Only use the output address bits */ lsr x9, x9, #L2_SHIFT /* Set the physical address for this virtual address */ 1: orr x13, x12, x9, lsl #L2_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret LEND(build_l2_block_pagetable) #if PAGE_SIZE != PAGE_SIZE_4K /* * Builds an L2 -> L3 table descriptor * * x6 = L2 table * x8 = Virtual Address * x9 = L3 PA (trashed) * x11, x12 and x13 are trashed */ LENTRY(link_l2_pagetable) /* * Link an L2 -> L3 table entry. */ /* Find the table index */ lsr x11, x8, #L2_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L1 block entry */ mov x12, #L2_TABLE /* Only use the output address bits */ lsr x9, x9, #PAGE_SHIFT orr x13, x12, x9, lsl #PAGE_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] ret LEND(link_l2_pagetable) /* * Builds count level 3 page table entries. Uses ATTR_CONTIGUOUS to create * large page (L3C) mappings when the current VA and remaining count allow * it. * x6 = L3 table * x7 = Block attributes * x8 = VA start * x9 = PA start (trashed) * x10 = Entry count (trashed) * x11, x12 and x13 are trashed * * VA start (x8) modulo L3C_SIZE must equal PA start (x9) modulo L3C_SIZE. */ LENTRY(build_l3_page_pagetable) /* * Build the L3 table entry. */ /* Find the table index */ lsr x11, x8, #L3_SHIFT and x11, x11, #Ln_ADDR_MASK /* Build the L3 page entry */ orr x12, x7, #L3_PAGE orr x12, x12, #(ATTR_DEFAULT) orr x12, x12, #(ATTR_S1_UXN) #ifdef __ARM_FEATURE_BTI_DEFAULT orr x12, x12, #(ATTR_S1_GP) #endif /* Only use the output address bits */ lsr x9, x9, #L3_SHIFT /* Check if an ATTR_CONTIGUOUS mapping is possible */ 1: tst x11, #(L3C_ENTRIES - 1) b.ne 2f cmp x10, #L3C_ENTRIES b.lo 3f orr x12, x12, #(ATTR_CONTIGUOUS) b 2f 3: and x12, x12, #(~ATTR_CONTIGUOUS) /* Set the physical address for this virtual address */ 2: orr x13, x12, x9, lsl #L3_SHIFT /* Store the entry */ str x13, [x6, x11, lsl #3] sub x10, x10, #1 add x11, x11, #1 add x9, x9, #1 cbnz x10, 1b ret LEND(build_l3_page_pagetable) #endif LENTRY(start_mmu) dsb sy /* Load the exception vectors */ ldr x2, =exception_vectors msr vbar_el1, x2 /* Load ttbr0 and ttbr1 */ msr ttbr0_el1, x27 msr ttbr1_el1, x24 isb /* Clear the Monitor Debug System control register */ msr mdscr_el1, xzr /* Invalidate the TLB */ tlbi vmalle1is dsb ish isb ldr x2, mair msr mair_el1, x2 /* * Setup TCR according to the PARange and ASIDBits fields * from ID_AA64MMFR0_EL1 and the HAFDBS field from the * ID_AA64MMFR1_EL1. More precisely, set TCR_EL1.AS * to 1 only if the ASIDBits field equals 0b0010. */ ldr x2, tcr mrs x3, id_aa64mmfr0_el1 /* Copy the bottom 3 bits from id_aa64mmfr0_el1 into TCR.IPS */ bfi x2, x3, #(TCR_IPS_SHIFT), #(TCR_IPS_WIDTH) and x3, x3, #(ID_AA64MMFR0_ASIDBits_MASK) /* Check if the HW supports 16 bit ASIDS */ cmp x3, #(ID_AA64MMFR0_ASIDBits_16) /* If so x3 == 1, else x3 == 0 */ cset x3, eq /* Set TCR.AS with x3 */ bfi x2, x3, #(TCR_ASID_SHIFT), #(TCR_ASID_WIDTH) /* * Check if the HW supports access flag and dirty state updates, * and set TCR_EL1.HA and TCR_EL1.HD accordingly. */ mrs x3, id_aa64mmfr1_el1 and x3, x3, #(ID_AA64MMFR1_HAFDBS_MASK) cmp x3, #1 b.ne 1f orr x2, x2, #(TCR_HA) b 2f 1: cmp x3, #2 b.ne 2f orr x2, x2, #(TCR_HA | TCR_HD) 2: msr tcr_el1, x2 /* * Setup SCTLR. */ ldr x2, sctlr_set ldr x3, sctlr_clear mrs x1, sctlr_el1 bic x1, x1, x3 /* Clear the required bits */ orr x1, x1, x2 /* Set the required bits */ msr sctlr_el1, x1 isb ret .align 3 mair: .quad MAIR_ATTR(MAIR_DEVICE_nGnRnE, VM_MEMATTR_DEVICE_nGnRnE) | \ MAIR_ATTR(MAIR_NORMAL_NC, VM_MEMATTR_UNCACHEABLE) | \ MAIR_ATTR(MAIR_NORMAL_WB, VM_MEMATTR_WRITE_BACK) | \ MAIR_ATTR(MAIR_NORMAL_WT, VM_MEMATTR_WRITE_THROUGH) | \ MAIR_ATTR(MAIR_DEVICE_nGnRE, VM_MEMATTR_DEVICE_nGnRE) tcr: #if PAGE_SIZE == PAGE_SIZE_4K #define TCR_TG (TCR_TG1_4K | TCR_TG0_4K) #elif PAGE_SIZE == PAGE_SIZE_16K #define TCR_TG (TCR_TG1_16K | TCR_TG0_16K) #else #error Unsupported page size #endif .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_TG | \ TCR_CACHE_ATTRS | TCR_SMP_ATTRS) sctlr_set: /* Bits to set */ .quad (SCTLR_LSMAOE | SCTLR_nTLSMD | SCTLR_UCI | SCTLR_SPAN | \ SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \ SCTLR_I | SCTLR_SED | SCTLR_SA0 | SCTLR_SA | SCTLR_C | \ SCTLR_M | SCTLR_CP15BEN | SCTLR_BT1 | SCTLR_BT0) sctlr_clear: /* Bits to clear */ .quad (SCTLR_EE | SCTLR_E0E | SCTLR_IESB | SCTLR_WXN | SCTLR_UMA | \ SCTLR_ITD | SCTLR_A) LEND(start_mmu) ENTRY(abort) b abort END(abort) .bss .align PAGE_SHIFT initstack: .space BOOT_STACK_SIZE initstack_end: .section .init_pagetable, "aw", %nobits .align PAGE_SHIFT /* * 6 initial tables (in the following order): * L2 for kernel (High addresses) * L1 for kernel * L0 for kernel * L1 bootstrap for user (Low addresses) * L0 bootstrap for user * L0 for user */ .globl pagetable_l0_ttbr1 pagetable: #if PAGE_SIZE != PAGE_SIZE_4K .space (PAGE_SIZE * L3_PAGE_COUNT) pagetable_l2_ttbr1: #endif .space PAGE_SIZE pagetable_l1_ttbr1: .space PAGE_SIZE pagetable_l0_ttbr1: .space PAGE_SIZE pagetable_l2_ttbr0_bootstrap: .space PAGE_SIZE pagetable_l1_ttbr0_bootstrap: .space PAGE_SIZE pagetable_l0_ttbr0_bootstrap: .space PAGE_SIZE pagetable_l0_ttbr0: .space PAGE_SIZE pagetable_end: el2_pagetable: .space PAGE_SIZE .section .rodata, "a", %progbits .globl aarch32_sigcode .align 2 aarch32_sigcode: .word 0xe1a0000d // mov r0, sp .word 0xe2800040 // add r0, r0, #SIGF_UC .word 0xe59f700c // ldr r7, [pc, #12] .word 0xef000000 // swi #0 .word 0xe59f7008 // ldr r7, [pc, #8] .word 0xef000000 // swi #0 .word 0xeafffffa // b . - 16 .word SYS_sigreturn .word SYS_exit .align 3 .size aarch32_sigcode, . - aarch32_sigcode aarch32_esigcode: .data .global sz_aarch32_sigcode sz_aarch32_sigcode: .quad aarch32_esigcode - aarch32_sigcode + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/memcmp.S b/sys/arm64/arm64/memcmp.S index 8517a181f3f3..19f577f218e0 100644 --- a/sys/arm64/arm64/memcmp.S +++ b/sys/arm64/arm64/memcmp.S @@ -1,136 +1,139 @@ /* memcmp - compare memory * * Copyright (c) 2013-2020, Arm Limited. * SPDX-License-Identifier: MIT */ /* Assumptions: * * ARMv8-a, AArch64, unaligned accesses. */ +#include + #include #define L(l) .L ## l /* Parameters and result. */ #define src1 x0 #define src2 x1 #define limit x2 #define result w0 /* Internal variables. */ #define data1 x3 #define data1w w3 #define data1h x4 #define data2 x5 #define data2w w5 #define data2h x6 #define tmp1 x7 #define tmp2 x8 ENTRY (memcmp) subs limit, limit, 8 b.lo L(less8) ldr data1, [src1], 8 ldr data2, [src2], 8 cmp data1, data2 b.ne L(return) subs limit, limit, 8 b.gt L(more16) ldr data1, [src1, limit] ldr data2, [src2, limit] b L(return) L(more16): ldr data1, [src1], 8 ldr data2, [src2], 8 cmp data1, data2 bne L(return) /* Jump directly to comparing the last 16 bytes for 32 byte (or less) strings. */ subs limit, limit, 16 b.ls L(last_bytes) /* We overlap loads between 0-32 bytes at either side of SRC1 when we try to align, so limit it only to strings larger than 128 bytes. */ cmp limit, 96 b.ls L(loop16) /* Align src1 and adjust src2 with bytes not yet done. */ and tmp1, src1, 15 add limit, limit, tmp1 sub src1, src1, tmp1 sub src2, src2, tmp1 /* Loop performing 16 bytes per iteration using aligned src1. Limit is pre-decremented by 16 and must be larger than zero. Exit if <= 16 bytes left to do or if the data is not equal. */ .p2align 4 L(loop16): ldp data1, data1h, [src1], 16 ldp data2, data2h, [src2], 16 subs limit, limit, 16 ccmp data1, data2, 0, hi ccmp data1h, data2h, 0, eq b.eq L(loop16) cmp data1, data2 bne L(return) mov data1, data1h mov data2, data2h cmp data1, data2 bne L(return) /* Compare last 1-16 bytes using unaligned access. */ L(last_bytes): add src1, src1, limit add src2, src2, limit ldp data1, data1h, [src1] ldp data2, data2h, [src2] cmp data1, data2 bne L(return) mov data1, data1h mov data2, data2h cmp data1, data2 /* Compare data bytes and set return value to 0, -1 or 1. */ L(return): #ifndef __AARCH64EB__ rev data1, data1 rev data2, data2 #endif cmp data1, data2 L(ret_eq): cset result, ne cneg result, result, lo ret .p2align 4 /* Compare up to 8 bytes. Limit is [-8..-1]. */ L(less8): adds limit, limit, 4 b.lo L(less4) ldr data1w, [src1], 4 ldr data2w, [src2], 4 cmp data1w, data2w b.ne L(return) sub limit, limit, 4 L(less4): adds limit, limit, 4 beq L(ret_eq) L(byte_loop): ldrb data1w, [src1], 1 ldrb data2w, [src2], 1 subs limit, limit, 1 ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */ b.eq L(byte_loop) sub result, data1w, data2w ret END (memcmp) +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/memcpy.S b/sys/arm64/arm64/memcpy.S index b394d6c1d30a..01daa8e1c228 100644 --- a/sys/arm64/arm64/memcpy.S +++ b/sys/arm64/arm64/memcpy.S @@ -1,242 +1,245 @@ /* * memcpy - copy memory area * * Copyright (c) 2012-2020, Arm Limited. * SPDX-License-Identifier: MIT */ /* Assumptions: * * ARMv8-a, AArch64, unaligned accesses. * */ +#include + #include #define L(l) .L ## l #define dstin x0 #define src x1 #define count x2 #define dst x3 #define srcend x4 #define dstend x5 #define A_l x6 #define A_lw w6 #define A_h x7 #define B_l x8 #define B_lw w8 #define B_h x9 #define C_l x10 #define C_lw w10 #define C_h x11 #define D_l x12 #define D_h x13 #define E_l x14 #define E_h x15 #define F_l x16 #define F_h x17 #define G_l count #define G_h dst #define H_l src #define H_h srcend #define tmp1 x14 /* This implementation handles overlaps and supports both memcpy and memmove from a single entry point. It uses unaligned accesses and branchless sequences to keep the code small, simple and improve performance. Copies are split into 3 main cases: small copies of up to 32 bytes, medium copies of up to 128 bytes, and large copies. The overhead of the overlap check is negligible since it is only required for large copies. Large copies use a software pipelined loop processing 64 bytes per iteration. The destination pointer is 16-byte aligned to minimize unaligned accesses. The loop tail is handled by always copying 64 bytes from the end. */ EENTRY(memmove) ENTRY(memcpy) add srcend, src, count add dstend, dstin, count cmp count, 128 b.hi L(copy_long) cmp count, 32 b.hi L(copy32_128) /* Small copies: 0..32 bytes. */ cmp count, 16 b.lo L(copy16) ldp A_l, A_h, [src] ldp D_l, D_h, [srcend, -16] stp A_l, A_h, [dstin] stp D_l, D_h, [dstend, -16] ret /* Copy 8-15 bytes. */ L(copy16): tbz count, 3, L(copy8) ldr A_l, [src] ldr A_h, [srcend, -8] str A_l, [dstin] str A_h, [dstend, -8] ret .p2align 3 /* Copy 4-7 bytes. */ L(copy8): tbz count, 2, L(copy4) ldr A_lw, [src] ldr B_lw, [srcend, -4] str A_lw, [dstin] str B_lw, [dstend, -4] ret /* Copy 0..3 bytes using a branchless sequence. */ L(copy4): cbz count, L(copy0) lsr tmp1, count, 1 ldrb A_lw, [src] ldrb C_lw, [srcend, -1] ldrb B_lw, [src, tmp1] strb A_lw, [dstin] strb B_lw, [dstin, tmp1] strb C_lw, [dstend, -1] L(copy0): ret .p2align 4 /* Medium copies: 33..128 bytes. */ L(copy32_128): ldp A_l, A_h, [src] ldp B_l, B_h, [src, 16] ldp C_l, C_h, [srcend, -32] ldp D_l, D_h, [srcend, -16] cmp count, 64 b.hi L(copy128) stp A_l, A_h, [dstin] stp B_l, B_h, [dstin, 16] stp C_l, C_h, [dstend, -32] stp D_l, D_h, [dstend, -16] ret .p2align 4 /* Copy 65..128 bytes. */ L(copy128): ldp E_l, E_h, [src, 32] ldp F_l, F_h, [src, 48] cmp count, 96 b.ls L(copy96) ldp G_l, G_h, [srcend, -64] ldp H_l, H_h, [srcend, -48] stp G_l, G_h, [dstend, -64] stp H_l, H_h, [dstend, -48] L(copy96): stp A_l, A_h, [dstin] stp B_l, B_h, [dstin, 16] stp E_l, E_h, [dstin, 32] stp F_l, F_h, [dstin, 48] stp C_l, C_h, [dstend, -32] stp D_l, D_h, [dstend, -16] ret .p2align 4 /* Copy more than 128 bytes. */ L(copy_long): /* Use backwards copy if there is an overlap. */ sub tmp1, dstin, src cbz tmp1, L(copy0) cmp tmp1, count b.lo L(copy_long_backwards) /* Copy 16 bytes and then align dst to 16-byte alignment. */ ldp D_l, D_h, [src] and tmp1, dstin, 15 bic dst, dstin, 15 sub src, src, tmp1 add count, count, tmp1 /* Count is now 16 too large. */ ldp A_l, A_h, [src, 16] stp D_l, D_h, [dstin] ldp B_l, B_h, [src, 32] ldp C_l, C_h, [src, 48] ldp D_l, D_h, [src, 64]! subs count, count, 128 + 16 /* Test and readjust count. */ b.ls L(copy64_from_end) L(loop64): stp A_l, A_h, [dst, 16] ldp A_l, A_h, [src, 16] stp B_l, B_h, [dst, 32] ldp B_l, B_h, [src, 32] stp C_l, C_h, [dst, 48] ldp C_l, C_h, [src, 48] stp D_l, D_h, [dst, 64]! ldp D_l, D_h, [src, 64]! subs count, count, 64 b.hi L(loop64) /* Write the last iteration and copy 64 bytes from the end. */ L(copy64_from_end): ldp E_l, E_h, [srcend, -64] stp A_l, A_h, [dst, 16] ldp A_l, A_h, [srcend, -48] stp B_l, B_h, [dst, 32] ldp B_l, B_h, [srcend, -32] stp C_l, C_h, [dst, 48] ldp C_l, C_h, [srcend, -16] stp D_l, D_h, [dst, 64] stp E_l, E_h, [dstend, -64] stp A_l, A_h, [dstend, -48] stp B_l, B_h, [dstend, -32] stp C_l, C_h, [dstend, -16] ret .p2align 4 /* Large backwards copy for overlapping copies. Copy 16 bytes and then align dst to 16-byte alignment. */ L(copy_long_backwards): ldp D_l, D_h, [srcend, -16] and tmp1, dstend, 15 sub srcend, srcend, tmp1 sub count, count, tmp1 ldp A_l, A_h, [srcend, -16] stp D_l, D_h, [dstend, -16] ldp B_l, B_h, [srcend, -32] ldp C_l, C_h, [srcend, -48] ldp D_l, D_h, [srcend, -64]! sub dstend, dstend, tmp1 subs count, count, 128 b.ls L(copy64_from_start) L(loop64_backwards): stp A_l, A_h, [dstend, -16] ldp A_l, A_h, [srcend, -16] stp B_l, B_h, [dstend, -32] ldp B_l, B_h, [srcend, -32] stp C_l, C_h, [dstend, -48] ldp C_l, C_h, [srcend, -48] stp D_l, D_h, [dstend, -64]! ldp D_l, D_h, [srcend, -64]! subs count, count, 64 b.hi L(loop64_backwards) /* Write the last iteration and copy 64 bytes from the start. */ L(copy64_from_start): ldp G_l, G_h, [src, 48] stp A_l, A_h, [dstend, -16] ldp A_l, A_h, [src, 32] stp B_l, B_h, [dstend, -32] ldp B_l, B_h, [src, 16] stp C_l, C_h, [dstend, -48] ldp C_l, C_h, [src] stp D_l, D_h, [dstend, -64] stp G_l, G_h, [dstin, 48] stp A_l, A_h, [dstin, 32] stp B_l, B_h, [dstin, 16] stp C_l, C_h, [dstin] ret END(memcpy) EEND(memmove) +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/memset.S b/sys/arm64/arm64/memset.S index ec434493ce13..f52bfd62cc54 100644 --- a/sys/arm64/arm64/memset.S +++ b/sys/arm64/arm64/memset.S @@ -1,197 +1,201 @@ /* Copyright (c) 2012, Linaro Limited All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Linaro nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Assumptions: * * ARMv8-a, AArch64 * Unaligned accesses * */ +#include + #include #define dstin x0 #define val w1 #define count x2 #define tmp1 x3 #define tmp1w w3 #define tmp2 x4 #define tmp2w w4 #define zva_len_x x5 #define zva_len w5 #define zva_bits_x x6 #define A_l x7 #define A_lw w7 #define dst x8 #define tmp3w w9 ENTRY(memset) mov dst, dstin /* Preserve return value. */ ands A_lw, val, #255 #ifndef DONT_USE_DC b.eq .Lzero_mem #endif orr A_lw, A_lw, A_lw, lsl #8 orr A_lw, A_lw, A_lw, lsl #16 orr A_l, A_l, A_l, lsl #32 .Ltail_maybe_long: cmp count, #64 b.ge .Lnot_short .Ltail_maybe_tiny: cmp count, #15 b.le .Ltail15tiny .Ltail63: ands tmp1, count, #0x30 b.eq .Ltail15 add dst, dst, tmp1 cmp tmp1w, #0x20 b.eq 1f b.lt 2f stp A_l, A_l, [dst, #-48] 1: stp A_l, A_l, [dst, #-32] 2: stp A_l, A_l, [dst, #-16] .Ltail15: and count, count, #15 add dst, dst, count stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */ ret .Ltail15tiny: /* Set up to 15 bytes. Does not assume earlier memory being set. */ tbz count, #3, 1f str A_l, [dst], #8 1: tbz count, #2, 1f str A_lw, [dst], #4 1: tbz count, #1, 1f strh A_lw, [dst], #2 1: tbz count, #0, 1f strb A_lw, [dst] 1: ret /* Critical loop. Start at a new cache line boundary. Assuming * 64 bytes per line, this ensures the entire loop is in one line. */ .p2align 6 .Lnot_short: neg tmp2, dst ands tmp2, tmp2, #15 b.eq 2f /* Bring DST to 128-bit (16-byte) alignment. We know that there's * more than that to set, so we simply store 16 bytes and advance by * the amount required to reach alignment. */ sub count, count, tmp2 stp A_l, A_l, [dst] add dst, dst, tmp2 /* There may be less than 63 bytes to go now. */ cmp count, #63 b.le .Ltail63 2: sub dst, dst, #16 /* Pre-bias. */ sub count, count, #64 1: stp A_l, A_l, [dst, #16] stp A_l, A_l, [dst, #32] stp A_l, A_l, [dst, #48] stp A_l, A_l, [dst, #64]! subs count, count, #64 b.ge 1b tst count, #0x3f add dst, dst, #16 b.ne .Ltail63 ret /* For zeroing memory, check to see if we can use the ZVA feature to * zero entire 'cache' lines. */ .Lzero_mem: mov A_l, #0 cmp count, #63 b.le .Ltail_maybe_tiny neg tmp2, dst ands tmp2, tmp2, #15 b.eq 1f sub count, count, tmp2 stp A_l, A_l, [dst] add dst, dst, tmp2 cmp count, #63 b.le .Ltail63 1: /* For zeroing small amounts of memory, it's not worth setting up * the line-clear code. */ cmp count, #128 b.lt .Lnot_short adrp tmp2, dczva_line_size add tmp2, tmp2, :lo12:dczva_line_size ldr zva_len, [tmp2] cbz zva_len, .Lnot_short .Lzero_by_line: /* Compute how far we need to go to become suitably aligned. We're * already at quad-word alignment. */ cmp count, zva_len_x b.lt .Lnot_short /* Not enough to reach alignment. */ sub zva_bits_x, zva_len_x, #1 neg tmp2, dst ands tmp2, tmp2, zva_bits_x b.eq 1f /* Already aligned. */ /* Not aligned, check that there's enough to copy after alignment. */ sub tmp1, count, tmp2 cmp tmp1, #64 ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */ b.lt .Lnot_short /* We know that there's at least 64 bytes to zero and that it's safe * to overrun by 64 bytes. */ mov count, tmp1 2: stp A_l, A_l, [dst] stp A_l, A_l, [dst, #16] stp A_l, A_l, [dst, #32] subs tmp2, tmp2, #64 stp A_l, A_l, [dst, #48] add dst, dst, #64 b.ge 2b /* We've overrun a bit, so adjust dst downwards. */ add dst, dst, tmp2 1: sub count, count, zva_len_x 3: dc zva, dst add dst, dst, zva_len_x subs count, count, zva_len_x b.ge 3b ands count, count, zva_bits_x b.ne .Ltail_maybe_long ret END(memset) + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/sigtramp.S b/sys/arm64/arm64/sigtramp.S index f1936e695f33..3f1bb42c269f 100644 --- a/sys/arm64/arm64/sigtramp.S +++ b/sys/arm64/arm64/sigtramp.S @@ -1,59 +1,62 @@ /*- * Copyright (c) 2014 The FreeBSD Foundation * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "assym.inc" +#include #include #include .section .rodata, "a", %progbits .globl sigcode .align 2 sigcode: blr x8 mov x0, sp add x0, x0, #SF_UC 1: mov x8, #SYS_sigreturn svc 0 /* sigreturn failed, exit */ mov x8, #SYS_exit svc 0 b 1b /* This may be copied to the stack, keep it 16-byte aligned */ .align 3 .size sigcode, . - sigcode esigcode: .data .align 3 .global szsigcode szsigcode: .quad esigcode - sigcode + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/strcmp.S b/sys/arm64/arm64/strcmp.S index 0d66aae07d9e..d31576bbcf34 100644 --- a/sys/arm64/arm64/strcmp.S +++ b/sys/arm64/arm64/strcmp.S @@ -1,189 +1,192 @@ /* * strcmp - compare two strings * * Copyright (c) 2012-2022, Arm Limited. * SPDX-License-Identifier: MIT */ /* Assumptions: * * ARMv8-a, AArch64. * MTE compatible. */ +#include + #include #define L(l) .L ## l #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f #define src1 x0 #define src2 x1 #define result x0 #define data1 x2 #define data1w w2 #define data2 x3 #define data2w w3 #define has_nul x4 #define diff x5 #define off1 x5 #define syndrome x6 #define tmp x6 #define data3 x7 #define zeroones x8 #define shift x9 #define off2 x10 /* On big-endian early bytes are at MSB and on little-endian LSB. LS_FW means shifting towards early bytes. */ #ifdef __AARCH64EB__ # define LS_FW lsl #else # define LS_FW lsr #endif /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. Since carry propagation makes 0x1 bytes before a NUL byte appear NUL too in big-endian, byte-reverse the data before the NUL check. */ ENTRY (strcmp) sub off2, src2, src1 mov zeroones, REP8_01 and tmp, src1, 7 tst off2, 7 b.ne L(misaligned8) cbnz tmp, L(mutual_align) .p2align 4 L(loop_aligned): ldr data2, [src1, off2] ldr data1, [src1], 8 L(start_realigned): #ifdef __AARCH64EB__ rev tmp, data1 sub has_nul, tmp, zeroones orr tmp, tmp, REP8_7f #else sub has_nul, data1, zeroones orr tmp, data1, REP8_7f #endif bics has_nul, has_nul, tmp /* Non-zero if NUL terminator. */ ccmp data1, data2, 0, eq b.eq L(loop_aligned) #ifdef __AARCH64EB__ rev has_nul, has_nul #endif eor diff, data1, data2 orr syndrome, diff, has_nul L(end): #ifndef __AARCH64EB__ rev syndrome, syndrome rev data1, data1 rev data2, data2 #endif clz shift, syndrome /* The most-significant-non-zero bit of the syndrome marks either the first bit that is different, or the top bit of the first zero byte. Shifting left now will bring the critical information into the top bits. */ lsl data1, data1, shift lsl data2, data2, shift /* But we need to zero-extend (char is unsigned) the value and then perform a signed 32-bit subtraction. */ lsr data1, data1, 56 sub result, data1, data2, lsr 56 ret .p2align 4 L(mutual_align): /* Sources are mutually aligned, but are not currently at an alignment boundary. Round down the addresses and then mask off the bytes that precede the start point. */ bic src1, src1, 7 ldr data2, [src1, off2] ldr data1, [src1], 8 neg shift, src2, lsl 3 /* Bits to alignment -64. */ mov tmp, -1 LS_FW tmp, tmp, shift orr data1, data1, tmp orr data2, data2, tmp b L(start_realigned) L(misaligned8): /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always checking to make sure that we don't access beyond the end of SRC2. */ cbz tmp, L(src1_aligned) L(do_misaligned): ldrb data1w, [src1], 1 ldrb data2w, [src2], 1 cmp data1w, 0 ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */ b.ne L(done) tst src1, 7 b.ne L(do_misaligned) L(src1_aligned): neg shift, src2, lsl 3 bic src2, src2, 7 ldr data3, [src2], 8 #ifdef __AARCH64EB__ rev data3, data3 #endif lsr tmp, zeroones, shift orr data3, data3, tmp sub has_nul, data3, zeroones orr tmp, data3, REP8_7f bics has_nul, has_nul, tmp b.ne L(tail) sub off1, src2, src1 .p2align 4 L(loop_unaligned): ldr data3, [src1, off1] ldr data2, [src1, off2] #ifdef __AARCH64EB__ rev data3, data3 #endif sub has_nul, data3, zeroones orr tmp, data3, REP8_7f ldr data1, [src1], 8 bics has_nul, has_nul, tmp ccmp data1, data2, 0, eq b.eq L(loop_unaligned) lsl tmp, has_nul, shift #ifdef __AARCH64EB__ rev tmp, tmp #endif eor diff, data1, data2 orr syndrome, diff, tmp cbnz syndrome, L(end) L(tail): ldr data1, [src1] neg shift, shift lsr data2, data3, shift lsr has_nul, has_nul, shift #ifdef __AARCH64EB__ rev data2, data2 rev has_nul, has_nul #endif eor diff, data1, data2 orr syndrome, diff, has_nul b L(end) L(done): sub result, data1, data2 ret END (strcmp) +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/strncmp.S b/sys/arm64/arm64/strncmp.S index 595de0312678..1b475b4ce449 100644 --- a/sys/arm64/arm64/strncmp.S +++ b/sys/arm64/arm64/strncmp.S @@ -1,307 +1,310 @@ /* * strncmp - compare two strings * * Copyright (c) 2013-2022, Arm Limited. * SPDX-License-Identifier: MIT */ /* Assumptions: * * ARMv8-a, AArch64. * MTE compatible. */ +#include + #include #define L(l) .L ## l #define REP8_01 0x0101010101010101 #define REP8_7f 0x7f7f7f7f7f7f7f7f /* Parameters and result. */ #define src1 x0 #define src2 x1 #define limit x2 #define result x0 /* Internal variables. */ #define data1 x3 #define data1w w3 #define data2 x4 #define data2w w4 #define has_nul x5 #define diff x6 #define syndrome x7 #define tmp1 x8 #define tmp2 x9 #define tmp3 x10 #define zeroones x11 #define pos x12 #define mask x13 #define endloop x14 #define count mask #define offset pos #define neg_offset x15 /* Define endian dependent shift operations. On big-endian early bytes are at MSB and on little-endian LSB. LS_FW means shifting towards early bytes. LS_BK means shifting towards later bytes. */ #ifdef __AARCH64EB__ #define LS_FW lsl #define LS_BK lsr #else #define LS_FW lsr #define LS_BK lsl #endif ENTRY (strncmp) cbz limit, L(ret0) eor tmp1, src1, src2 mov zeroones, #REP8_01 tst tmp1, #7 and count, src1, #7 b.ne L(misaligned8) cbnz count, L(mutual_align) /* NUL detection works on the principle that (X - 1) & (~X) & 0x80 (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and can be done in parallel across the entire word. */ .p2align 4 L(loop_aligned): ldr data1, [src1], #8 ldr data2, [src2], #8 L(start_realigned): subs limit, limit, #8 sub tmp1, data1, zeroones orr tmp2, data1, #REP8_7f eor diff, data1, data2 /* Non-zero if differences found. */ csinv endloop, diff, xzr, hi /* Last Dword or differences. */ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */ ccmp endloop, #0, #0, eq b.eq L(loop_aligned) /* End of main loop */ L(full_check): #ifndef __AARCH64EB__ orr syndrome, diff, has_nul add limit, limit, 8 /* Rewind limit to before last subs. */ L(syndrome_check): /* Limit was reached. Check if the NUL byte or the difference is before the limit. */ rev syndrome, syndrome rev data1, data1 clz pos, syndrome rev data2, data2 lsl data1, data1, pos cmp limit, pos, lsr #3 lsl data2, data2, pos /* But we need to zero-extend (char is unsigned) the value and then perform a signed 32-bit subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 csel result, result, xzr, hi ret #else /* Not reached the limit, must have found the end or a diff. */ tbz limit, #63, L(not_limit) add tmp1, limit, 8 cbz limit, L(not_limit) lsl limit, tmp1, #3 /* Bits -> bytes. */ mov mask, #~0 lsr mask, mask, limit bic data1, data1, mask bic data2, data2, mask /* Make sure that the NUL byte is marked in the syndrome. */ orr has_nul, has_nul, mask L(not_limit): /* For big-endian we cannot use the trick with the syndrome value as carry-propagation can corrupt the upper bits if the trailing bytes in the string contain 0x01. */ /* However, if there is no NUL byte in the dword, we can generate the result directly. We can't just subtract the bytes as the MSB might be significant. */ cbnz has_nul, 1f cmp data1, data2 cset result, ne cneg result, result, lo ret 1: /* Re-compute the NUL-byte detection, using a byte-reversed value. */ rev tmp3, data1 sub tmp1, tmp3, zeroones orr tmp2, tmp3, #REP8_7f bic has_nul, tmp1, tmp2 rev has_nul, has_nul orr syndrome, diff, has_nul clz pos, syndrome /* The most-significant-non-zero bit of the syndrome marks either the first bit that is different, or the top bit of the first zero byte. Shifting left now will bring the critical information into the top bits. */ L(end_quick): lsl data1, data1, pos lsl data2, data2, pos /* But we need to zero-extend (char is unsigned) the value and then perform a signed 32-bit subtraction. */ lsr data1, data1, #56 sub result, data1, data2, lsr #56 ret #endif L(mutual_align): /* Sources are mutually aligned, but are not currently at an alignment boundary. Round down the addresses and then mask off the bytes that precede the start point. We also need to adjust the limit calculations, but without overflowing if the limit is near ULONG_MAX. */ bic src1, src1, #7 bic src2, src2, #7 ldr data1, [src1], #8 neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */ ldr data2, [src2], #8 mov tmp2, #~0 LS_FW tmp2, tmp2, tmp3 /* Shift (count & 63). */ /* Adjust the limit and ensure it doesn't overflow. */ adds limit, limit, count csinv limit, limit, xzr, lo orr data1, data1, tmp2 orr data2, data2, tmp2 b L(start_realigned) .p2align 4 /* Don't bother with dwords for up to 16 bytes. */ L(misaligned8): cmp limit, #16 b.hs L(try_misaligned_words) L(byte_loop): /* Perhaps we can do better than this. */ ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 subs limit, limit, #1 ccmp data1w, #1, #0, hi /* NZCV = 0b0000. */ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.eq L(byte_loop) L(done): sub result, data1, data2 ret /* Align the SRC1 to a dword by doing a bytewise compare and then do the dword loop. */ L(try_misaligned_words): cbz count, L(src1_aligned) neg count, count and count, count, #7 sub limit, limit, count L(page_end_loop): ldrb data1w, [src1], #1 ldrb data2w, [src2], #1 cmp data1w, #1 ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */ b.ne L(done) subs count, count, #1 b.hi L(page_end_loop) /* The following diagram explains the comparison of misaligned strings. The bytes are shown in natural order. For little-endian, it is reversed in the registers. The "x" bytes are before the string. The "|" separates data that is loaded at one time. src1 | a a a a a a a a | b b b c c c c c | . . . src2 | x x x x x a a a a a a a a b b b | c c c c c . . . After shifting in each step, the data looks like this: STEP_A STEP_B STEP_C data1 a a a a a a a a b b b c c c c c b b b c c c c c data2 a a a a a a a a b b b 0 0 0 0 0 0 0 0 c c c c c The bytes with "0" are eliminated from the syndrome via mask. Align SRC2 down to 16 bytes. This way we can read 16 bytes at a time from SRC2. The comparison happens in 3 steps. After each step the loop can exit, or read from SRC1 or SRC2. */ L(src1_aligned): /* Calculate offset from 8 byte alignment to string start in bits. No need to mask offset since shifts are ignoring upper bits. */ lsl offset, src2, #3 bic src2, src2, #0xf mov mask, -1 neg neg_offset, offset ldr data1, [src1], #8 ldp tmp1, tmp2, [src2], #16 LS_BK mask, mask, neg_offset and neg_offset, neg_offset, #63 /* Need actual value for cmp later. */ /* Skip the first compare if data in tmp1 is irrelevant. */ tbnz offset, 6, L(misaligned_mid_loop) L(loop_misaligned): /* STEP_A: Compare full 8 bytes when there is enough data from SRC2.*/ LS_FW data2, tmp1, offset LS_BK tmp1, tmp2, neg_offset subs limit, limit, #8 orr data2, data2, tmp1 /* 8 bytes from SRC2 combined from two regs.*/ sub has_nul, data1, zeroones eor diff, data1, data2 /* Non-zero if differences found. */ orr tmp3, data1, #REP8_7f csinv endloop, diff, xzr, hi /* If limit, set to all ones. */ bic has_nul, has_nul, tmp3 /* Non-zero if NUL byte found in SRC1. */ orr tmp3, endloop, has_nul cbnz tmp3, L(full_check) ldr data1, [src1], #8 L(misaligned_mid_loop): /* STEP_B: Compare first part of data1 to second part of tmp2. */ LS_FW data2, tmp2, offset #ifdef __AARCH64EB__ /* For big-endian we do a byte reverse to avoid carry-propagation problem described above. This way we can reuse the has_nul in the next step and also use syndrome value trick at the end. */ rev tmp3, data1 #define data1_fixed tmp3 #else #define data1_fixed data1 #endif sub has_nul, data1_fixed, zeroones orr tmp3, data1_fixed, #REP8_7f eor diff, data2, data1 /* Non-zero if differences found. */ bic has_nul, has_nul, tmp3 /* Non-zero if NUL terminator. */ #ifdef __AARCH64EB__ rev has_nul, has_nul #endif cmp limit, neg_offset, lsr #3 orr syndrome, diff, has_nul bic syndrome, syndrome, mask /* Ignore later bytes. */ csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */ cbnz tmp3, L(syndrome_check) /* STEP_C: Compare second part of data1 to first part of tmp1. */ ldp tmp1, tmp2, [src2], #16 cmp limit, #8 LS_BK data2, tmp1, neg_offset eor diff, data2, data1 /* Non-zero if differences found. */ orr syndrome, diff, has_nul and syndrome, syndrome, mask /* Ignore earlier bytes. */ csinv tmp3, syndrome, xzr, hi /* If limit, set to all ones. */ cbnz tmp3, L(syndrome_check) ldr data1, [src1], #8 sub limit, limit, #8 b L(loop_misaligned) #ifdef __AARCH64EB__ L(syndrome_check): clz pos, syndrome cmp pos, limit, lsl #3 b.lo L(end_quick) #endif L(ret0): mov result, #0 ret END(strncmp) +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S index bb93cfd521e1..2d067c7f7730 100644 --- a/sys/arm64/arm64/support.S +++ b/sys/arm64/arm64/support.S @@ -1,389 +1,393 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014-2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ +#include + #include #include #include #include #include "assym.inc" .macro check_user_access user_arg, limit, bad_addr_func ldr x7, =(\limit) cmp x\user_arg, x7 b.cs \bad_addr_func .endm /* * One of the fu* or su* functions failed, return -1. */ ENTRY(fsu_fault) SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */ EXIT_USER_ACCESS_CHECK(w0, x1) fsu_fault_nopcb: mov x0, #-1 ret END(fsu_fault) /* * int swapueword8_llsc(volatile uint8_t *, uint8_t *) */ ENTRY(swapueword8_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldrb w7, [x1] ldxrb w2, [x0] stxrb w3, w7, [x0] cbnz w3, 1f strb w2, [x1] /* Stash old value in *val */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, w3 ret END(swapueword8_llsc) /* * int swapueword8_lse(volatile uint8_t *, uint8_t *) */ ENTRY(swapueword8_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldrb w7, [x1] .arch_extension lse swpb w7, w2, [x0] .arch_extension nolse strb w2, [x1] /* Stash old value in *val */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, #0 ret END(swapueword8_lse) /* * int swapueword32_llsc(volatile uint32_t *, uint32_t *) */ ENTRY(swapueword32_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldr w7, [x1] ldxr w2, [x0] /* Stash the old value in w2 */ stxr w3, w7, [x0] /* Store new value */ cbnz w3, 1f str w2, [x1] /* Stash old value in *val */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, w3 ret END(swapueword32_llsc) /* * int swapueword32_lse(volatile uint32_t *, uint32_t *) */ ENTRY(swapueword32_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldr w7, [x1] .arch_extension lse swp w7, w2, [x0] .arch_extension nolse str w2, [x1] /* Stash old value in *val */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, #0 ret END(swapueword32_llsc) /* * int casueword32_llsc(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) */ ENTRY(casueword32_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ mov w5, #1 SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldxr w4, [x0] /* Load-exclusive the data */ cmp w4, w1 /* Compare */ b.ne 1f /* Not equal, exit */ stxr w5, w3, [x0] /* Store the new data */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str w4, [x2] /* Store the read data */ mov w0, w5 /* Result same as store status */ ret /* Return */ END(casueword32_llsc) /* * int casueword32_lse(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) */ ENTRY(casueword32_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) mov w7, w1 /* Back up the compare value */ .arch_extension lse cas w1, w3, [x0] /* Compare and Swap */ .arch_extension nolse cmp w1, w7 /* Check if successful */ cset w0, ne /* Return 0 on success, 1 on failure */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str w1, [x2] /* Store the read data */ ret /* Return */ END(casueword32_lse) /* * int casueword_llsc(volatile u_long *, u_long, u_long *, u_long) */ ENTRY(casueword_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-7), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ mov w5, #1 SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldxr x4, [x0] /* Load-exclusive the data */ cmp x4, x1 /* Compare */ b.ne 1f /* Not equal, exit */ stxr w5, x3, [x0] /* Store the new data */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str x4, [x2] /* Store the read data */ mov w0, w5 /* Result same as store status */ ret /* Return */ END(casueword_llsc) /* * int casueword_lse(volatile u_long *, u_long, u_long *, u_long) */ ENTRY(casueword_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) mov x7, x1 /* Back up the compare value */ .arch_extension lse cas x1, x3, [x0] /* Compare and Swap */ .arch_extension nolse cmp x1, x7 /* Check if successful */ cset w0, ne /* Return 0 on success, 1 on failure */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str x1, [x2] /* Store the read data */ ret /* Return */ END(casueword_lse) .macro fsudata insn, ret_reg, user_arg adr x7, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x7, x6) /* And set it */ \insn \ret_reg, [x\user_arg] /* Try accessing the data */ SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ .endm /* * int fubyte(volatile const void *) */ ENTRY(fubyte) check_user_access 0, (VM_MAXUSER_ADDRESS), fsu_fault_nopcb fsudata ldtrb, w0, 0 ret /* Return */ END(fubyte) /* * int fuword(volatile const void *) */ ENTRY(fuword16) check_user_access 0, (VM_MAXUSER_ADDRESS-1), fsu_fault_nopcb fsudata ldtrh, w0, 0 ret /* Return */ END(fuword16) /* * int32_t fueword32(volatile const void *, int32_t *) */ ENTRY(fueword32) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb fsudata ldtr, w0, 0 str w0, [x1] /* Save the data in kernel space */ mov w0, #0 /* Success */ ret /* Return */ END(fueword32) /* * long fueword(volatile const void *, int64_t *) * int64_t fueword64(volatile const void *, int64_t *) */ EENTRY(fueword64) ENTRY(fueword) check_user_access 0, (VM_MAXUSER_ADDRESS-7), fsu_fault_nopcb fsudata ldtr, x0, 0 str x0, [x1] /* Save the data in kernel space */ mov x0, #0 /* Success */ ret /* Return */ END(fueword) EEND(fueword64) /* * int subyte(volatile void *, int) */ ENTRY(subyte) check_user_access 0, (VM_MAXUSER_ADDRESS), fsu_fault_nopcb fsudata sttrb, w1, 0 mov x0, #0 /* Success */ ret /* Return */ END(subyte) /* * int suword16(volatile void *, int) */ ENTRY(suword16) check_user_access 0, (VM_MAXUSER_ADDRESS-1), fsu_fault_nopcb fsudata sttrh, w1, 0 mov x0, #0 /* Success */ ret /* Return */ END(suword16) /* * int suword32(volatile void *, int) */ ENTRY(suword32) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb fsudata sttr, w1, 0 mov x0, #0 /* Success */ ret /* Return */ END(suword32) /* * int suword(volatile void *, long) */ EENTRY(suword64) ENTRY(suword) check_user_access 0, (VM_MAXUSER_ADDRESS-7), fsu_fault_nopcb fsudata sttr, x1, 0 mov x0, #0 /* Success */ ret /* Return */ END(suword) EEND(suword64) ENTRY(setjmp) /* Store the stack pointer */ mov x8, sp str x8, [x0], #8 /* Store the general purpose registers and lr */ stp x19, x20, [x0], #16 stp x21, x22, [x0], #16 stp x23, x24, [x0], #16 stp x25, x26, [x0], #16 stp x27, x28, [x0], #16 stp x29, lr, [x0], #16 /* Return value */ mov x0, #0 ret END(setjmp) ENTRY(longjmp) /* Restore the stack pointer */ ldr x8, [x0], #8 mov sp, x8 /* Restore the general purpose registers and lr */ ldp x19, x20, [x0], #16 ldp x21, x22, [x0], #16 ldp x23, x24, [x0], #16 ldp x25, x26, [x0], #16 ldp x27, x28, [x0], #16 ldp x29, lr, [x0], #16 /* Load the return value */ mov x0, x1 ret END(longjmp) /* * pagezero, simple implementation */ ENTRY(pagezero_simple) add x1, x0, #PAGE_SIZE 1: stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 cmp x0, x1 b.ne 1b ret END(pagezero_simple) /* * pagezero, cache assisted */ ENTRY(pagezero_cache) add x1, x0, #PAGE_SIZE adrp x2, dczva_line_size ldr x2, [x2, :lo12:dczva_line_size] 1: dc zva, x0 add x0, x0, x2 cmp x0, x1 b.ne 1b ret END(pagezero_cache) + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S index 9c43de3a9eae..ca00d473fd47 100644 --- a/sys/arm64/arm64/swtch.S +++ b/sys/arm64/arm64/swtch.S @@ -1,279 +1,282 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "assym.inc" #include "opt_kstack_pages.h" #include "opt_sched.h" +#include + #include #include .macro clear_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 bic \tmp, \tmp, #MDSCR_SS msr mdscr_el1, \tmp isb 999: .endm .macro set_step_flag pcbflags, tmp tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f mrs \tmp, mdscr_el1 orr \tmp, \tmp, #MDSCR_SS msr mdscr_el1, \tmp isb 999: .endm /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) /* Of old == NULL skip disabling stepping */ cbz x0, 1f /* If we were single stepping, disable it */ ldr x4, [x0, #TD_PCB] ldr w5, [x4, #PCB_FLAGS] clear_step_flag w5, x6 1: #ifdef VFP /* Backup the new thread pointer around a call to C code */ mov x19, x1 bl vfp_discard mov x0, x19 #else mov x0, x1 #endif /* This returns the thread pointer so no need to save it */ bl ptrauth_switch #ifdef PERTHREAD_SSP mov x19, x0 #endif /* This returns the thread pcb */ bl pmap_switch mov x4, x0 #ifdef PERTHREAD_SSP /* Update the per-thread stack canary pointer. */ add x19, x19, #(TD_MD_CANARY) msr sp_el0, x19 #endif /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] set_step_flag w5, x6 /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldr x6, [x4, #PCB_TPIDRRO] msr tpidrro_el0, x6 ldp x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8] ldp x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8] ldp x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8] ldp x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8] ldp x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8] ldp x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8] ret END(cpu_throw) /* * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) * * x0 = old * x1 = new * x2 = mtx * x3 to x7, x16 and x17 are caller saved */ ENTRY(cpu_switch) /* * Save the old context. */ ldr x4, [x0, #TD_PCB] /* Store the callee-saved registers */ stp x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8] stp x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8] stp x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8] stp x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8] stp x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8] stp x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8] /* And the old stack pointer */ mov x5, sp mrs x6, tpidrro_el0 str x6, [x4, #PCB_TPIDRRO] mrs x6, tpidr_el0 stp x5, x6, [x4, #PCB_SP] /* If we were single stepping, disable it */ ldr w5, [x4, #PCB_FLAGS] clear_step_flag w5, x6 mov x19, x0 mov x20, x1 mov x21, x2 #ifdef VFP /* Load the pcb address */ mov x1, x4 bl vfp_save_state mov x0, x20 #else mov x0, x1 #endif /* This returns the thread pointer so no need to save it */ bl ptrauth_switch /* This returns the thread pcb */ bl pmap_switch /* Move the new pcb out of the way */ mov x4, x0 mov x2, x21 mov x1, x20 mov x0, x19 #ifdef PERTHREAD_SSP /* Update the per-thread stack canary pointer. */ add x20, x20, #(TD_MD_CANARY) msr sp_el0, x20 #endif /* * Release the old thread. */ stlr x2, [x0, #TD_LOCK] #if defined(SCHED_ULE) && defined(SMP) /* Spin if TD_LOCK points to a blocked_lock */ ldr x2, =_C_LABEL(blocked_lock) 1: ldar x3, [x1, #TD_LOCK] cmp x3, x2 b.eq 1b #endif /* If we are single stepping, enable it */ ldr w5, [x4, #PCB_FLAGS] set_step_flag w5, x6 /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 msr tpidr_el0, x6 ldr x6, [x4, #PCB_TPIDRRO] msr tpidrro_el0, x6 ldp x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8] ldp x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8] ldp x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8] ldp x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8] ldp x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8] ldp x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8] ret END(cpu_switch) ENTRY(fork_trampoline) mov x0, x19 mov x1, x20 mov x2, sp mov fp, #0 /* Stack traceback stops here. */ bl _C_LABEL(fork_exit) /* * Disable interrupts as we are setting userspace specific * state that we won't handle correctly in an interrupt while * in the kernel. */ msr daifset, #(DAIF_D | DAIF_INTR) ldr x0, [x18, #PC_CURTHREAD] bl ptrauth_enter_el0 /* Restore sp, lr, elr, and spsr */ ldp x18, lr, [sp, #TF_SP] ldp x10, x11, [sp, #TF_ELR] msr sp_el0, x18 msr spsr_el1, x11 msr elr_el1, x10 /* Restore the CPU registers */ ldp x0, x1, [sp, #TF_X + 0 * 8] ldp x2, x3, [sp, #TF_X + 2 * 8] ldp x4, x5, [sp, #TF_X + 4 * 8] ldp x6, x7, [sp, #TF_X + 6 * 8] ldp x8, x9, [sp, #TF_X + 8 * 8] ldp x10, x11, [sp, #TF_X + 10 * 8] ldp x12, x13, [sp, #TF_X + 12 * 8] ldp x14, x15, [sp, #TF_X + 14 * 8] ldp x16, x17, [sp, #TF_X + 16 * 8] ldp x18, x19, [sp, #TF_X + 18 * 8] ldp x20, x21, [sp, #TF_X + 20 * 8] ldp x22, x23, [sp, #TF_X + 22 * 8] ldp x24, x25, [sp, #TF_X + 24 * 8] ldp x26, x27, [sp, #TF_X + 26 * 8] ldp x28, x29, [sp, #TF_X + 28 * 8] /* * No need for interrupts reenabling since PSR * will be set to the desired value anyway. */ ERET END(fork_trampoline) ENTRY(savectx) /* Store the callee-saved registers */ stp x19, x20, [x0, #PCB_REGS + (PCB_X19 + 0) * 8] stp x21, x22, [x0, #PCB_REGS + (PCB_X19 + 2) * 8] stp x23, x24, [x0, #PCB_REGS + (PCB_X19 + 4) * 8] stp x25, x26, [x0, #PCB_REGS + (PCB_X19 + 6) * 8] stp x27, x28, [x0, #PCB_REGS + (PCB_X19 + 8) * 8] stp x29, lr, [x0, #PCB_REGS + (PCB_X19 + 10) * 8] /* And the old stack pointer */ mov x5, sp mrs x6, tpidrro_el0 str x6, [x0, #PCB_TPIDRRO] mrs x6, tpidr_el0 stp x5, x6, [x0, #PCB_SP] /* Store the VFP registers */ #ifdef VFP mov x28, lr bl vfp_save_state_savectx mov lr, x28 #endif ret END(savectx) +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/vmm/vmm_call.S b/sys/arm64/vmm/vmm_call.S index fc28e3f173eb..8caf0465f938 100644 --- a/sys/arm64/vmm/vmm_call.S +++ b/sys/arm64/vmm/vmm_call.S @@ -1,39 +1,42 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2017 Alexandru Elisei * * This software was developed by Alexandru Elisei under sponsorship * from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include #include .text ENTRY(vmm_call_hyp) hvc #0 ret END(vmm_call_hyp) + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/vmm/vmm_hyp_el2.S b/sys/arm64/vmm/vmm_hyp_el2.S index 7012e238c9c8..0ba040ee7bad 100644 --- a/sys/arm64/vmm/vmm_hyp_el2.S +++ b/sys/arm64/vmm/vmm_hyp_el2.S @@ -1,39 +1,44 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Andrew Turner * * This work was supported by Innovate UK project 105694, "Digital Security * by Design (DSbD) Technology Platform Prototype". * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include + +#include #include .section .rodata .align PAGE_SHIFT .globl vmm_hyp_code vmm_hyp_code: .incbin "vmm_hyp_blob.bin" .globl vmm_hyp_code_end vmm_hyp_code_end: + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/cddl/dev/dtrace/aarch64/dtrace_asm.S b/sys/cddl/dev/dtrace/aarch64/dtrace_asm.S index 3984d12bf67b..a934733251a3 100644 --- a/sys/cddl/dev/dtrace/aarch64/dtrace_asm.S +++ b/sys/cddl/dev/dtrace/aarch64/dtrace_asm.S @@ -1,174 +1,177 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright 2004 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. */ #define _ASM #define _LOCORE #include #include +#include #include #include #include "assym.inc" /* void dtrace_membar_producer(void) */ ENTRY(dtrace_membar_producer) RET END(dtrace_membar_producer) /* void dtrace_membar_consumer(void) */ ENTRY(dtrace_membar_consumer) RET END(dtrace_membar_consumer) /* dtrace_icookie_t dtrace_interrupt_disable(void) */ ENTRY(dtrace_interrupt_disable) mrs x0, daif msr daifset, #2 RET END(dtrace_interrupt_disable) /* void dtrace_interrupt_enable(dtrace_icookie_t cookie) */ ENTRY(dtrace_interrupt_enable) msr daif, x0 RET END(dtrace_interrupt_enable) /* uint8_t dtrace_fuword8_nocheck(void *addr) */ ENTRY(dtrace_fuword8_nocheck) ldtrb w0, [x0] RET END(dtrace_fuword8_nocheck) /* uint16_t dtrace_fuword16_nocheck(void *addr) */ ENTRY(dtrace_fuword16_nocheck) ldtrh w0, [x0] RET END(dtrace_fuword16_nocheck) /* uint32_t dtrace_fuword32_nocheck(void *addr) */ ENTRY(dtrace_fuword32_nocheck) ldtr w0, [x0] RET END(dtrace_fuword32_nocheck) /* uint64_t dtrace_fuword64_nocheck(void *addr) */ ENTRY(dtrace_fuword64_nocheck) ldtr x0, [x0] RET END(dtrace_fuword64_nocheck) /* void dtrace_copy(uintptr_t uaddr, uintptr_t kaddr, size_t size) */ ENTRY(dtrace_copy) cbz x2, 2f /* If len == 0 then skip loop */ 1: ldtrb w4, [x0] /* Load from uaddr */ add x0, x0, #1 strb w4, [x1], #1 /* Store in kaddr */ sub x2, x2, #1 /* len-- */ cbnz x2, 1b 2: RET END(dtrace_copy) /* void dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size, volatile uint16_t *flags) XXX: Check for flags? */ ENTRY(dtrace_copystr) cbz x2, 2f /* If len == 0 then skip loop */ 1: ldtrb w4, [x0] /* Load from uaddr */ add x0, x0, #1 strb w4, [x1], #1 /* Store in kaddr */ cbz w4, 2f /* If == 0 then break */ sub x2, x2, #1 /* len-- */ cbnz x2, 1b 2: RET END(dtrace_copystr) /* uintptr_t dtrace_caller(int aframes) */ ENTRY(dtrace_caller) mov x0, #-1 RET END(dtrace_caller) /* uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) */ ENTRY(dtrace_cas32) 1: ldxr w3, [x0] /* Load target */ cmp w3, w1 /* Check if *target == cmp */ bne 2f /* No, return */ stxr w12, w2, [x0] /* Store new to target */ cbnz w12, 1b /* Try again if store not succeed */ 2: mov w0, w3 /* Return the value loaded from target */ RET END(dtrace_cas32) /* void * dtrace_casptr(volatile void *target, volatile void *cmp, volatile void *new) */ ENTRY(dtrace_casptr) 1: ldxr x3, [x0] /* Load target */ cmp x3, x1 /* Check if *target == cmp */ bne 2f /* No, return */ stxr w12, x2, [x0] /* Store new to target */ cbnz w12, 1b /* Try again if store not succeed */ 2: mov x0, x3 /* Return the value loaded from target */ RET END(dtrace_casptr) + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 index 8139a7af8ed3..26f9eaf193af 100644 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -1,750 +1,750 @@ ## ## Kernel ## kern/msi_if.m optional intrng kern/pic_if.m optional intrng kern/subr_devmap.c standard kern/subr_intr.c optional intrng kern/subr_physmem.c standard libkern/strlen.c standard libkern/arm64/crc32c_armv8.S standard arm/arm/generic_timer.c standard arm/arm/gic.c standard arm/arm/gic_acpi.c optional acpi arm/arm/gic_fdt.c optional fdt arm/arm/gic_if.m standard arm/arm/pmu.c standard arm/arm/pmu_acpi.c optional acpi arm/arm/pmu_fdt.c optional fdt arm64/acpica/acpi_iort.c optional acpi arm64/acpica/acpi_machdep.c optional acpi arm64/acpica/OsdEnvironment.c optional acpi arm64/acpica/acpi_wakeup.c optional acpi arm64/acpica/pci_cfgreg.c optional acpi pci arm64/arm64/autoconf.c standard arm64/arm64/bus_machdep.c standard arm64/arm64/bus_space_asm.S standard arm64/arm64/busdma_bounce.c standard arm64/arm64/busdma_machdep.c standard arm64/arm64/clock.c standard arm64/arm64/copyinout.S standard arm64/arm64/cpu_errata.c standard arm64/arm64/cpufunc_asm.S standard arm64/arm64/db_disasm.c optional ddb arm64/arm64/db_interface.c optional ddb arm64/arm64/db_trace.c optional ddb arm64/arm64/debug_monitor.c standard arm64/arm64/disassem.c optional ddb arm64/arm64/dump_machdep.c standard arm64/arm64/efirt_machdep.c optional efirt arm64/arm64/elf32_machdep.c optional compat_freebsd32 arm64/arm64/elf_machdep.c standard arm64/arm64/exception.S standard arm64/arm64/exec_machdep.c standard arm64/arm64/freebsd32_machdep.c optional compat_freebsd32 arm64/arm64/gdb_machdep.c optional gdb arm64/arm64/gicv3_its.c optional intrng fdt arm64/arm64/gic_v3.c standard arm64/arm64/gic_v3_acpi.c optional acpi arm64/arm64/gic_v3_fdt.c optional fdt arm64/arm64/hyp_stub.S standard arm64/arm64/identcpu.c standard arm64/arm64/locore.S standard no-obj arm64/arm64/machdep.c standard arm64/arm64/machdep_boot.c standard arm64/arm64/mem.c standard arm64/arm64/memcmp.S standard arm64/arm64/memcpy.S standard arm64/arm64/memset.S standard arm64/arm64/minidump_machdep.c standard arm64/arm64/mp_machdep.c optional smp arm64/arm64/nexus.c standard arm64/arm64/ofw_machdep.c optional fdt arm64/arm64/pl031_rtc.c optional fdt pl031 arm64/arm64/ptrauth.c standard \ - compile-with "${NORMAL_C:N-mbranch-protection*}" + compile-with "${NORMAL_C:N-mbranch-protection*} -mbranch-protection=bti" arm64/arm64/pmap.c standard arm64/arm64/ptrace_machdep.c standard arm64/arm64/sigtramp.S standard arm64/arm64/stack_machdep.c optional ddb | stack arm64/arm64/strcmp.S standard arm64/arm64/strncmp.S standard arm64/arm64/support_ifunc.c standard arm64/arm64/support.S standard arm64/arm64/swtch.S standard arm64/arm64/sys_machdep.c standard arm64/arm64/trap.c standard arm64/arm64/uio_machdep.c standard arm64/arm64/undefined.c standard arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \ compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*}" arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard arm64/coresight/coresight.c standard arm64/coresight/coresight_acpi.c optional acpi arm64/coresight/coresight_fdt.c optional fdt arm64/coresight/coresight_if.m standard arm64/coresight/coresight_cmd.c standard arm64/coresight/coresight_cpu_debug.c optional fdt arm64/coresight/coresight_etm4x.c standard arm64/coresight/coresight_etm4x_acpi.c optional acpi arm64/coresight/coresight_etm4x_fdt.c optional fdt arm64/coresight/coresight_funnel.c standard arm64/coresight/coresight_funnel_acpi.c optional acpi arm64/coresight/coresight_funnel_fdt.c optional fdt arm64/coresight/coresight_replicator.c standard arm64/coresight/coresight_replicator_acpi.c optional acpi arm64/coresight/coresight_replicator_fdt.c optional fdt arm64/coresight/coresight_tmc.c standard arm64/coresight/coresight_tmc_acpi.c optional acpi arm64/coresight/coresight_tmc_fdt.c optional fdt dev/smbios/smbios_subr.c standard arm64/iommu/iommu.c optional iommu arm64/iommu/iommu_if.m optional iommu arm64/iommu/iommu_pmap.c optional iommu arm64/iommu/smmu.c optional iommu arm64/iommu/smmu_acpi.c optional iommu acpi arm64/iommu/smmu_fdt.c optional iommu fdt arm64/iommu/smmu_quirks.c optional iommu dev/iommu/busdma_iommu.c optional iommu dev/iommu/iommu_gas.c optional iommu arm64/vmm/vmm.c optional vmm arm64/vmm/vmm_dev.c optional vmm arm64/vmm/vmm_instruction_emul.c optional vmm arm64/vmm/vmm_stat.c optional vmm arm64/vmm/vmm_arm64.c optional vmm arm64/vmm/vmm_reset.c optional vmm arm64/vmm/vmm_call.S optional vmm arm64/vmm/vmm_hyp_exception.S optional vmm \ compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*:N-mbranch-protection*} -fpie" \ no-obj arm64/vmm/vmm_hyp.c optional vmm \ compile-with "${NORMAL_C:N-fsanitize*:N-fno-sanitize*:N-mbranch-protection*} -fpie" \ no-obj vmm_hyp_blob.elf.full optional vmm \ dependency "vmm_hyp.o vmm_hyp_exception.o" \ compile-with "${SYSTEM_LD_BASECMD} -o ${.TARGET} ${.ALLSRC} --defsym=_start='0x0' --defsym=text_start='0x0'" \ no-obj no-implicit-rule vmm_hyp_blob.elf optional vmm \ dependency "vmm_hyp_blob.elf.full" \ compile-with "${OBJCOPY} --strip-debug ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule vmm_hyp_blob.bin optional vmm \ dependency vmm_hyp_blob.elf \ compile-with "${OBJCOPY} --output-target=binary ${.ALLSRC} ${.TARGET}" \ no-obj no-implicit-rule arm64/vmm/vmm_hyp_el2.S optional vmm \ dependency vmm_hyp_blob.bin arm64/vmm/vmm_mmu.c optional vmm arm64/vmm/io/vgic.c optional vmm arm64/vmm/io/vgic_v3.c optional vmm arm64/vmm/io/vgic_if.m optional vmm arm64/vmm/io/vtimer.c optional vmm crypto/armv8/armv8_crypto.c optional armv8crypto armv8_crypto_wrap.o optional armv8crypto \ dependency "$S/crypto/armv8/armv8_crypto_wrap.c" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8 ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "armv8_crypto_wrap.o" aesv8-armx.o optional armv8crypto | ossl \ dependency "$S/crypto/openssl/aarch64/aesv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8 -I$S/crypto/openssl ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "aesv8-armx.o" ghashv8-armx.o optional armv8crypto \ dependency "$S/crypto/openssl/aarch64/ghashv8-armx.S" \ compile-with "${CC} -c ${CFLAGS:C/^-O2$/-O3/:N-nostdinc:N-mgeneral-regs-only} -I$S/crypto/armv8 -I$S/crypto/openssl ${WERROR} ${NO_WCAST_QUAL} ${CFLAGS:M-march=*:S/^$/-march=armv8-a/}+crypto ${.IMPSRC}" \ no-implicit-rule \ clean "ghashv8-armx.o" crypto/des/des_enc.c optional netsmb crypto/openssl/ossl_aarch64.c optional ossl crypto/openssl/aarch64/chacha-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/poly1305-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha1-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha256-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/sha512-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" crypto/openssl/aarch64/vpaes-armv8.S optional ossl \ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}" dev/acpica/acpi_bus_if.m optional acpi dev/acpica/acpi_if.m optional acpi dev/acpica/acpi_pci_link.c optional acpi pci dev/acpica/acpi_pcib.c optional acpi pci dev/acpica/acpi_pxm.c optional acpi dev/ahci/ahci_generic.c optional ahci cddl/dev/dtrace/aarch64/dtrace_asm.S optional dtrace compile-with "${DTRACE_S}" cddl/dev/dtrace/aarch64/dtrace_subr.c optional dtrace compile-with "${DTRACE_C}" cddl/dev/fbt/aarch64/fbt_isa.c optional dtrace_fbt | dtraceall compile-with "${FBT_C}" # zfs blake3 hash support contrib/openzfs/module/icp/asm-aarch64/blake3/b3_aarch64_sse2.S optional zfs compile-with "${ZFS_S:N-mgeneral-regs-only}" contrib/openzfs/module/icp/asm-aarch64/blake3/b3_aarch64_sse41.S optional zfs compile-with "${ZFS_S:N-mgeneral-regs-only}" # zfs sha2 hash support zfs-sha256-armv8.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha256-armv8.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS:N-mgeneral-regs-only} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha256-armv8.S" \ no-implicit-rule \ clean "zfs-sha256-armv8.o" zfs-sha512-armv8.o optional zfs \ dependency "$S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha512-armv8.S" \ compile-with "${CC} -c ${ZFS_ASM_CFLAGS:N-mgeneral-regs-only} -o ${.TARGET} ${WERROR} $S/contrib/openzfs/module/icp/asm-aarch64/sha2/sha512-armv8.S" \ no-implicit-rule \ clean "zfs-sha512-armv8.o" ## ## ASoC support ## dev/sound/fdt/audio_dai_if.m optional sound fdt dev/sound/fdt/audio_soc.c optional sound fdt dev/sound/fdt/dummy_codec.c optional sound fdt dev/sound/fdt/simple_amplifier.c optional sound fdt ## ## Device drivers ## dev/axgbe/if_axgbe.c optional axa fdt dev/axgbe/xgbe-desc.c optional axa fdt dev/axgbe/xgbe-dev.c optional axa fdt dev/axgbe/xgbe-drv.c optional axa fdt dev/axgbe/xgbe-mdio.c optional axa fdt dev/axgbe/xgbe-sysctl.c optional axa fdt dev/axgbe/xgbe-txrx.c optional axa fdt dev/axgbe/xgbe_osdep.c optional axa fdt dev/axgbe/xgbe-phy-v1.c optional axa fdt dev/cpufreq/cpufreq_dt.c optional cpufreq fdt dev/dpaa2/dpaa2_bp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_buf.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_channel.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_cmd_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_con.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_console.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_io.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mac.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mc_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/dpaa2_mc_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/dpaa2_mc_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_mcp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_ni.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_rc.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp.c optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_swp_if.m optional soc_nxp_ls dpaa2 dev/dpaa2/dpaa2_types.c optional soc_nxp_ls dpaa2 dev/dpaa2/memac_mdio_acpi.c optional soc_nxp_ls dpaa2 acpi dev/dpaa2/memac_mdio_common.c optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_fdt.c optional soc_nxp_ls dpaa2 fdt dev/dpaa2/memac_mdio_if.m optional soc_nxp_ls dpaa2 acpi | soc_nxp_ls dpaa2 fdt # Synopsys DesignWare Ethernet Controller dev/dwc/if_dwc_rk.c optional fdt dwc_rk soc_rockchip_rk3328 | fdt dwc_rk soc_rockchip_rk3399 dev/dwc/if_dwc_socfpga.c optional fdt dwc_socfpga dev/enetc/enetc_mdio.c optional enetc soc_nxp_ls dev/enetc/if_enetc.c optional enetc iflib pci fdt soc_nxp_ls dev/eqos/if_eqos.c optional eqos dev/eqos/if_eqos_if.m optional eqos dev/eqos/if_eqos_fdt.c optional eqos fdt dev/etherswitch/felix/felix.c optional enetc etherswitch fdt felix pci soc_nxp_ls dev/firmware/arm/scmi.c optional fdt scmi dev/firmware/arm/scmi_clk.c optional fdt scmi dev/firmware/arm/scmi_if.m optional fdt scmi dev/firmware/arm/scmi_mailbox.c optional fdt scmi dev/firmware/arm/scmi_smc.c optional fdt scmi dev/firmware/arm/scmi_virtio.c optional fdt scmi virtio dev/firmware/arm/scmi_shmem.c optional fdt scmi dev/gpio/pl061.c optional pl061 gpio dev/gpio/pl061_acpi.c optional pl061 gpio acpi dev/gpio/pl061_fdt.c optional pl061 gpio fdt dev/gpio/qoriq_gpio.c optional soc_nxp_ls gpio fdt dev/hwpmc/hwpmc_arm64.c optional hwpmc dev/hwpmc/hwpmc_arm64_md.c optional hwpmc dev/hwpmc/hwpmc_cmn600.c optional hwpmc acpi arm64/arm64/cmn600.c optional hwpmc acpi dev/hwpmc/hwpmc_dmc620.c optional hwpmc acpi dev/hwpmc/pmu_dmc620.c optional hwpmc acpi # Microsoft Hyper-V dev/hyperv/vmbus/hyperv.c optional hyperv acpi dev/hyperv/vmbus/aarch64/hyperv_aarch64.c optional hyperv acpi dev/hyperv/vmbus/vmbus.c optional hyperv acpi pci dev/hyperv/vmbus/aarch64/vmbus_aarch64.c optional hyperv acpi dev/hyperv/vmbus/vmbus_if.m optional hyperv acpi dev/hyperv/vmbus/vmbus_res.c optional hyperv acpi dev/hyperv/vmbus/vmbus_xact.c optional hyperv acpi dev/hyperv/vmbus/aarch64/hyperv_machdep.c optional hyperv acpi dev/hyperv/vmbus/vmbus_chan.c optional hyperv acpi dev/hyperv/vmbus/hyperv_busdma.c optional hyperv acpi dev/hyperv/vmbus/vmbus_br.c optional hyperv acpi dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c optional hyperv acpi dev/hyperv/utilities/vmbus_timesync.c optional hyperv acpi dev/hyperv/utilities/vmbus_heartbeat.c optional hyperv acpi dev/hyperv/utilities/vmbus_ic.c optional hyperv acpi dev/hyperv/utilities/vmbus_shutdown.c optional hyperv acpi dev/hyperv/utilities/hv_kvp.c optional hyperv acpi dev/hyperv/input/hv_kbd.c optional hyperv acpi dev/hyperv/input/hv_kbdc.c optional hyperv acpi dev/hyperv/netvsc/hn_nvs.c optional hyperv acpi dev/hyperv/netvsc/hn_rndis.c optional hyperv acpi dev/hyperv/netvsc/if_hn.c optional hyperv acpi dev/hyperv/pcib/vmbus_pcib.c optional hyperv pci acpi dev/ice/if_ice_iflib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_lib.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_osdep.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_resmgr.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_strings.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_recovery_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_iflib_txrx.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_controlq.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_dcb.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flex_pipe.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_flow.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_nvm.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_sched.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_switch.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_vlan_mode.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fw_logging.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_fwlog.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/ice_rdma.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" dev/ice/irdma_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/irdma_di_if.m optional ice pci \ compile-with "${NORMAL_M} -I$S/dev/ice" dev/ice/ice_ddp_common.c optional ice pci \ compile-with "${NORMAL_C} -I$S/dev/ice" ice_ddp.c optional ice_ddp \ compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01032400 -mice_ddp -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "ice_ddp.c" ice_ddp.fwo optional ice_ddp \ dependency "ice_ddp.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "ice_ddp.fwo" ice_ddp.fw optional ice_ddp \ dependency "$S/contrib/dev/ice/ice-1.3.36.0.pkg" \ compile-with "${CP} $S/contrib/dev/ice/ice-1.3.36.0.pkg ice_ddp.fw" \ no-obj no-implicit-rule \ clean "ice_ddp.fw" dev/iicbus/controller/twsi/mv_twsi.c optional twsi fdt dev/iicbus/controller/twsi/a10_twsi.c optional twsi fdt dev/iicbus/controller/twsi/twsi.c optional twsi fdt dev/iicbus/controller/rockchip/rk_i2c.c optional rk_i2c fdt dev/ipmi/ipmi.c optional ipmi dev/ipmi/ipmi_acpi.c optional ipmi acpi dev/ipmi/ipmi_bt.c optional ipmi dev/ipmi/ipmi_kcs.c optional ipmi dev/ipmi/ipmi_smic.c optional ipmi dev/mailbox/arm/arm_doorbell.c optional fdt arm_doorbell dev/mbox/mbox_if.m optional soc_brcm_bcm2837 dev/mmc/host/dwmmc_altera.c optional dwmmc dwmmc_altera fdt dev/mmc/host/dwmmc_hisi.c optional dwmmc dwmmc_hisi fdt dev/mmc/host/dwmmc_rockchip.c optional dwmmc rk_dwmmc fdt dev/neta/if_mvneta_fdt.c optional neta fdt dev/neta/if_mvneta.c optional neta mdio mii fdt dev/ofw/ofw_cpu.c optional fdt dev/ofw/ofw_pci.c optional fdt pci dev/ofw/ofw_pcib.c optional fdt pci dev/pci/controller/pci_n1sdp.c optional pci_n1sdp acpi dev/pci/pci_host_generic.c optional pci dev/pci/pci_host_generic_acpi.c optional pci acpi dev/pci/pci_host_generic_den0115.c optional pci acpi dev/pci/pci_host_generic_fdt.c optional pci fdt dev/pci/pci_dw_mv.c optional pci fdt dev/pci/pci_dw.c optional pci fdt dev/pci/pci_dw_if.m optional pci fdt dev/psci/psci.c standard dev/psci/smccc_arm64.S standard dev/psci/smccc.c standard dev/pwm/controller/allwinner/aw_pwm.c optional fdt aw_pwm dev/pwm/controller//rockchip/rk_pwm.c optional fdt rk_pwm dev/random/armv8rng.c optional armv8_rng !random_loadable dev/safexcel/safexcel.c optional safexcel fdt dev/sdhci/sdhci_xenon.c optional sdhci_xenon sdhci dev/sdhci/sdhci_xenon_acpi.c optional sdhci_xenon sdhci acpi dev/sdhci/sdhci_xenon_fdt.c optional sdhci_xenon sdhci fdt dev/sram/mmio_sram.c optional fdt mmio_sram dev/sram/mmio_sram_if.m optional fdt mmio_sram dev/spibus/controller/allwinner/aw_spi.c optional fdt aw_spi dev/spibus/controller/rockchip/rk_spi.c optional fdt rk_spi dev/uart/uart_cpu_arm64.c optional uart dev/uart/uart_dev_mu.c optional uart uart_mu fdt dev/uart/uart_dev_pl011.c optional uart pl011 dev/usb/controller/dwc_otg_hisi.c optional dwcotg fdt soc_hisi_hi6220 dev/usb/controller/ehci_mv.c optional ehci_mv fdt dev/usb/controller/generic_ehci.c optional ehci dev/usb/controller/generic_ehci_acpi.c optional ehci acpi dev/usb/controller/generic_ehci_fdt.c optional ehci fdt dev/usb/controller/generic_ohci.c optional ohci fdt dev/usb/controller/generic_usb_if.m optional ohci fdt dev/usb/controller/musb_otg_allwinner.c optional musb fdt soc_allwinner_a64 dev/usb/controller/usb_nop_xceiv.c optional fdt dev/usb/controller/generic_xhci.c optional xhci dev/usb/controller/generic_xhci_acpi.c optional xhci acpi dev/usb/controller/generic_xhci_fdt.c optional xhci fdt dev/usb/controller/dwc3/dwc3.c optional xhci acpi dwc3 | xhci fdt dwc3 dev/usb/controller/dwc3/aw_dwc3.c optional xhci fdt dwc3 aw_dwc3 dev/usb/controller/dwc3/rk_dwc3.c optional xhci fdt dwc3 rk_dwc3 dev/vnic/mrml_bridge.c optional vnic fdt dev/vnic/nic_main.c optional vnic pci dev/vnic/nicvf_main.c optional vnic pci pci_iov dev/vnic/nicvf_queues.c optional vnic pci pci_iov dev/vnic/thunder_bgx_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_bgx.c optional soc_cavm_thunderx pci vnic pci dev/vnic/thunder_mdio_fdt.c optional soc_cavm_thunderx pci vnic fdt dev/vnic/thunder_mdio.c optional soc_cavm_thunderx pci vnic dev/vnic/lmac_if.m optional inet | inet6 | vnic ## ## SoC Support ## # Allwinner common files arm/allwinner/a10_timer.c optional a10_timer fdt arm/allwinner/a10_codec.c optional sound a10_codec fdt arm/allwinner/a31_dmac.c optional a31_dmac fdt arm/allwinner/a33_codec.c optional fdt sound a33_codec arm/allwinner/a64/sun50i_a64_acodec.c optional fdt sound a64_codec arm/allwinner/sunxi_dma_if.m optional a31_dmac arm/allwinner/aw_cir.c optional evdev aw_cir fdt arm/allwinner/aw_gpio.c optional gpio aw_gpio fdt arm/allwinner/aw_i2s.c optional fdt sound aw_i2s arm/allwinner/aw_mmc.c optional mmc aw_mmc fdt | mmccam aw_mmc fdt arm/allwinner/aw_nmi.c optional aw_nmi fdt \ compile-with "${NORMAL_C} -I$S/contrib/device-tree/include" arm/allwinner/aw_r_intc.c optional aw_r_intc fdt arm/allwinner/aw_rsb.c optional aw_rsb fdt arm/allwinner/aw_rtc.c optional aw_rtc fdt arm/allwinner/aw_sid.c optional aw_sid nvmem fdt arm/allwinner/aw_syscon.c optional aw_syscon syscon fdt arm/allwinner/aw_thermal.c optional aw_thermal nvmem fdt arm/allwinner/aw_usbphy.c optional ehci aw_usbphy fdt arm/allwinner/aw_usb3phy.c optional xhci aw_usbphy fdt arm/allwinner/aw_wdog.c optional aw_wdog fdt arm/allwinner/axp81x.c optional axp81x fdt arm/allwinner/if_awg.c optional awg syscon aw_sid nvmem fdt # Allwinner clock driver dev/clk/allwinner/aw_ccung.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_frac.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_m.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_mipi.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_nkmp.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_nm.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_nmm.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_np.c optional aw_ccu fdt dev/clk/allwinner/aw_clk_prediv_mux.c optional aw_ccu fdt dev/clk/allwinner/ccu_a64.c optional soc_allwinner_a64 aw_ccu fdt dev/clk/allwinner/ccu_h3.c optional soc_allwinner_h5 aw_ccu fdt dev/clk/allwinner/ccu_h6.c optional soc_allwinner_h6 aw_ccu fdt dev/clk/allwinner/ccu_h6_r.c optional soc_allwinner_h6 aw_ccu fdt dev/clk/allwinner/ccu_sun8i_r.c optional aw_ccu fdt dev/clk/allwinner/ccu_de2.c optional aw_ccu fdt # Allwinner padconf files arm/allwinner/a64/a64_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/a64/a64_r_padconf.c optional soc_allwinner_a64 fdt arm/allwinner/h3/h3_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h3/h3_r_padconf.c optional soc_allwinner_h5 fdt arm/allwinner/h6/h6_padconf.c optional soc_allwinner_h6 fdt arm/allwinner/h6/h6_r_padconf.c optional soc_allwinner_h6 fdt # Altera/Intel arm64/intel/stratix10-soc-fpga-mgr.c optional soc_intel_stratix10 fdt arm64/intel/stratix10-svc.c optional soc_intel_stratix10 fdt # Annapurna arm/annapurna/alpine/alpine_ccu.c optional al_ccu fdt arm/annapurna/alpine/alpine_nb_service.c optional al_nb_service fdt arm/annapurna/alpine/alpine_pci.c optional al_pci fdt arm/annapurna/alpine/alpine_pci_msix.c optional al_pci fdt arm/annapurna/alpine/alpine_serdes.c optional al_serdes fdt \ no-depend \ compile-with "${CC} -c -o ${.TARGET} ${CFLAGS} -I$S/contrib/alpine-hal -I$S/contrib/alpine-hal/eth ${.IMPSRC}" # Broadcom arm64/broadcom/brcmmdio/mdio_mux_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_nexus_iproc.c optional soc_brcm_ns2 fdt arm64/broadcom/brcmmdio/mdio_ns2_pcie_phy.c optional soc_brcm_ns2 fdt pci arm64/broadcom/genet/if_genet.c optional soc_brcm_bcm2838 fdt genet arm/broadcom/bcm2835/bcm2835_audio.c optional sound vchiq fdt \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" arm/broadcom/bcm2835/bcm2835_bsc.c optional bcm2835_bsc fdt arm/broadcom/bcm2835/bcm2835_clkman.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_cpufreq.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_dma.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_fbd.c optional vt soc_brcm_bcm2837 fdt | vt soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_firmware.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_ft5406.c optional evdev bcm2835_ft5406 fdt arm/broadcom/bcm2835/bcm2835_gpio.c optional gpio soc_brcm_bcm2837 fdt | gpio soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_intr.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_mbox.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_rng.c optional !random_loadable soc_brcm_bcm2837 fdt | !random_loadable soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhci.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_sdhost.c optional sdhci soc_brcm_bcm2837 fdt | sdhci soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_spi.c optional bcm2835_spi fdt arm/broadcom/bcm2835/bcm2835_vcbus.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_vcio.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2835_wdog.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm2836.c optional soc_brcm_bcm2837 fdt | soc_brcm_bcm2838 fdt arm/broadcom/bcm2835/bcm283x_dwc_fdt.c optional dwcotg fdt soc_brcm_bcm2837 | dwcotg fdt soc_brcm_bcm2838 arm/broadcom/bcm2835/bcm2838_pci.c optional soc_brcm_bcm2838 fdt pci arm/broadcom/bcm2835/bcm2838_xhci.c optional soc_brcm_bcm2838 fdt pci xhci arm/broadcom/bcm2835/raspberrypi_gpio.c optional soc_brcm_bcm2837 gpio fdt | soc_brcm_bcm2838 gpio fdt contrib/vchiq/interface/compat/vchi_bsd.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_2835_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_arm.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -Wno-unused -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_connected.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_core.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kern_lib.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_kmod.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_shim.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" contrib/vchiq/interface/vchiq_arm/vchiq_util.c optional vchiq soc_brcm_bcm2837 \ compile-with "${NORMAL_C} -DUSE_VCHIQ_ARM -D__VCCOREVER__=0x04000000 -I$S/contrib/vchiq" # Cavium arm64/cavium/thunder_pcie_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_pem.c optional soc_cavm_thunderx pci arm64/cavium/thunder_pcie_pem_fdt.c optional soc_cavm_thunderx pci fdt arm64/cavium/thunder_pcie_common.c optional soc_cavm_thunderx pci # i.MX8 Clock support arm64/freescale/imx/imx8mq_ccm.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_gate.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_mux.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_composite.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_sscg_pll.c optional fdt soc_freescale_imx8 arm64/freescale/imx/clk/imx_clk_frac_pll.c optional fdt soc_freescale_imx8 # iMX drivers arm/freescale/imx/imx_gpio.c optional gpio soc_freescale_imx8 fdt arm/freescale/imx/imx_i2c.c optional fsliic arm/freescale/imx/imx_machdep.c optional fdt soc_freescale_imx8 arm64/freescale/imx/imx7gpc.c optional fdt soc_freescale_imx8 dev/ffec/if_ffec.c optional ffec # Marvell arm/mv/a37x0_gpio.c optional a37x0_gpio gpio fdt arm/mv/a37x0_iic.c optional a37x0_iic iicbus fdt arm/mv/a37x0_spi.c optional a37x0_spi spibus fdt arm/mv/clk/a37x0_tbg.c optional a37x0_tbg clk fdt syscon arm/mv/clk/a37x0_xtal.c optional a37x0_xtal clk fdt syscon arm/mv/armada38x/armada38x_rtc.c optional mv_rtc fdt arm/mv/gpio.c optional mv_gpio fdt arm/mv/mvebu_gpio.c optional mv_gpio fdt arm/mv/mvebu_pinctrl.c optional mvebu_pinctrl fdt arm/mv/mv_ap806_clock.c optional soc_marvell_8k fdt arm/mv/mv_ap806_gicp.c optional mv_ap806_gicp fdt arm/mv/mv_ap806_sei.c optional mv_ap806_sei fdt arm/mv/mv_cp110_clock.c optional soc_marvell_8k fdt arm/mv/mv_cp110_icu.c optional mv_cp110_icu fdt arm/mv/mv_cp110_icu_bus.c optional mv_cp110_icu fdt arm/mv/mv_thermal.c optional soc_marvell_8k mv_thermal fdt arm/mv/clk/a37x0_tbg_pll.c optional a37x0_tbg clk fdt syscon arm/mv/clk/a37x0_periph_clk_driver.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/a37x0_nb_periph_clk_driver.c optional a37x0_nb_periph clk fdt syscon arm/mv/clk/a37x0_sb_periph_clk_driver.c optional a37x0_sb_periph clk fdt syscon arm/mv/clk/periph.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_d.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_fixed.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_gate.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon arm/mv/clk/periph_clk_mux_gate.c optional a37x0_nb_periph a37x0_sb_periph clk fdt syscon # NVidia arm/nvidia/tegra_abpmisc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ahci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_efuse.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_ehci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_gpio.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_i2c.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_lic.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_mc.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_pcie.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_sdhci.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm_if.m optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_soctherm.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_uart.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_usbphy.c optional fdt soc_nvidia_tegra210 arm/nvidia/tegra_xhci.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_gpio.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_regulators.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/max77620_rtc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_car.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_per.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_pll.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_clk_super.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_coretemp.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_cpufreq.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pinmux.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_pmc.c optional fdt soc_nvidia_tegra210 arm64/nvidia/tegra210/tegra210_xusbpadctl.c optional fdt soc_nvidia_tegra210 # Nvidia firmware for Tegra tegra210_xusb_fw.c optional tegra210_xusb_fw \ dependency "$S/conf/files.arm64" \ compile-with "${AWK} -f $S/tools/fw_stub.awk tegra210_xusb.fw:tegra210_xusb_fw -mtegra210_xusb_fw -c${.TARGET}" \ no-ctfconvert no-implicit-rule before-depend local \ clean "tegra210_xusb_fw.c" tegra210_xusb.fwo optional tegra210_xusb_fw \ dependency "tegra210_xusb.fw" \ compile-with "${NORMAL_FWO}" \ no-implicit-rule \ clean "tegra210_xusb.fwo" tegra210_xusb.fw optional tegra210_xusb_fw \ dependency "$S/contrib/dev/nvidia/tegra210_xusb.bin.uu" \ compile-with "${NORMAL_FW}" \ no-obj no-implicit-rule \ clean "tegra210_xusb.fw" # NXP dev/iicbus/controller/vybrid/vf_i2c.c optional vf_i2c iicbus soc_nxp_ls dev/iicbus/controller/vybrid/vf_i2c_acpi.c optional vf_i2c iicbus acpi soc_nxp_ls dev/iicbus/controller/vybrid/vf_i2c_fdt.c optional vf_i2c iicbus fdt soc_nxp_ls arm64/qoriq/qoriq_dw_pci.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_gpio_pic.c optional gpio fdt soc_nxp_ls arm64/qoriq/qoriq_therm.c optional pci fdt soc_nxp_ls arm64/qoriq/qoriq_therm_if.m optional pci fdt soc_nxp_ls arm64/qoriq/clk/ls1028a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1028a_flexspi_clk.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1046a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/ls1088a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/lx2160a_clkgen.c optional clk soc_nxp_ls fdt arm64/qoriq/clk/qoriq_clk_pll.c optional clk soc_nxp_ls arm64/qoriq/clk/qoriq_clkgen.c optional clk soc_nxp_ls fdt dev/ahci/ahci_fsl_fdt.c optional soc_nxp_ls ahci fdt dev/flash/flexspi/flex_spi.c optional clk flex_spi soc_nxp_ls fdt # Qualcomm arm64/qualcomm/qcom_gcc.c optional qcom_gcc fdt dev/qcom_mdio/qcom_mdio_ipq4018.c optional qcom_mdio fdt mdio mii # RockChip Drivers arm64/rockchip/rk3328_codec.c optional fdt rk3328codec soc_rockchip_rk3328 arm64/rockchip/rk3399_emmcphy.c optional fdt rk_emmcphy soc_rockchip_rk3399 arm64/rockchip/rk3568_combphy.c optional fdt rk_combphy soc_rockchip_rk3568 arm64/rockchip/rk3568_pcie.c optional fdt pci soc_rockchip_rk3568 arm64/rockchip/rk3568_pciephy.c optional fdt pci soc_rockchip_rk3568 arm64/rockchip/rk_i2s.c optional fdt sound soc_rockchip_rk3328 | fdt sound soc_rockchip_rk3399 arm64/rockchip/rk_otp.c optional fdt soc_rockchip_rk3568 arm64/rockchip/rk_otp_if.m optional fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx_clocks.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx_regulators.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk8xx_rtc.c optional fdt rk805 soc_rockchip_rk3328 | fdt rk805 soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/iicbus/pmic/rockchip/rk805.c optional fdt rk805 soc_rockchip_rk3328 dev/iicbus/pmic/rockchip/rk808.c optional fdt rk805 soc_rockchip_rk3399 dev/iicbus/pmic/rockchip/rk817.c optional fdt rk817 soc_rockchip_rk3568 arm64/rockchip/rk_grf.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_pinctrl.c optional fdt rk_pinctrl soc_rockchip_rk3328 | fdt rk_pinctrl soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_gpio.c optional fdt rk_gpio soc_rockchip_rk3328 | fdt rk_gpio soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_iodomain.c optional fdt rk_iodomain arm64/rockchip/rk_usb2phy.c optional fdt rk_usb2phy soc_rockchip_rk3328 | fdt rk_usb2phy soc_rockchip_rk3399 | fdt rk_usb2phy soc_rockchip_rk3568 arm64/rockchip/rk_typec_phy.c optional fdt rk_typec_phy soc_rockchip_rk3399 arm64/rockchip/rk_tsadc_if.m optional fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_tsadc.c optional fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 arm64/rockchip/rk_pcie.c optional fdt pci soc_rockchip_rk3399 arm64/rockchip/rk_pcie_phy.c optional fdt pci soc_rockchip_rk3399 # RockChip Clock support dev/clk/rockchip/rk_cru.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk_clk_armclk.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk_clk_composite.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk_clk_fract.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk_clk_gate.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk_clk_mux.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk_clk_pll.c optional fdt soc_rockchip_rk3328 | fdt soc_rockchip_rk3399 | fdt soc_rockchip_rk3568 dev/clk/rockchip/rk3328_cru.c optional fdt soc_rockchip_rk3328 dev/clk/rockchip/rk3399_cru.c optional fdt soc_rockchip_rk3399 dev/clk/rockchip/rk3399_pmucru.c optional fdt soc_rockchip_rk3399 dev/clk/rockchip/rk3568_cru.c optional fdt soc_rockchip_rk3568 dev/clk/rockchip/rk3568_pmucru.c optional fdt soc_rockchip_rk3568 # Xilinx arm/xilinx/uart_dev_cdnc.c optional uart soc_xilinx_zynq fdt arm/xilinx/zy7_gpio.c optional gpio soc_xilinx_zynq fdt dev/iicbus/controller/cadence/cdnc_i2c.c optional cdnc_i2c iicbus soc_xilinx_zynq fdt dev/usb/controller/xlnx_dwc3.c optional xhci soc_xilinx_zynq fdt dev/firmware/xilinx/zynqmp_firmware.c optional fdt soc_xilinx_zynq dev/firmware/xilinx/zynqmp_firmware_if.m optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clock.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_div.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_fixed.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_gate.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_mux.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_clk_pll.c optional fdt soc_xilinx_zynq dev/clk/xilinx/zynqmp_reset.c optional fdt soc_xilinx_zynq diff --git a/sys/dev/psci/smccc_arm64.S b/sys/dev/psci/smccc_arm64.S index 25a64669fab3..2a3c09ec26b2 100644 --- a/sys/dev/psci/smccc_arm64.S +++ b/sys/dev/psci/smccc_arm64.S @@ -1,86 +1,91 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory (Department of Computer Science and * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the * DARPA SSITH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ +#include + #include + .macro arm_smccc_1_0 insn ENTRY(arm_smccc_\insn) \insn #0 ldr x4, [sp] cbz x4, 1f stp x0, x1, [x4, #16 * 0] stp x2, x3, [x4, #16 * 1] 1: ret END(arm_smccc_\insn) .endm /* * int arm_smccc_*(register_t, register_t, register_t, register_t, * register_t, register_t, register_t, register_t, * struct arm_smccc_res *res) */ arm_smccc_1_0 hvc arm_smccc_1_0 smc .macro arm_smccc_1_2 insn ENTRY(arm_smccc_1_2_\insn) stp x1, x19, [sp, #-16]! mov x19, x0 ldp x0, x1, [x19, #16 * 0] ldp x2, x3, [x19, #16 * 1] ldp x4, x5, [x19, #16 * 2] ldp x6, x7, [x19, #16 * 3] ldp x8, x9, [x19, #16 * 4] ldp x10, x11, [x19, #16 * 5] ldp x12, x13, [x19, #16 * 6] ldp x14, x15, [x19, #16 * 7] ldp x16, x17, [x19, #16 * 8] \insn #0 ldr x19, [sp] cbz x19, 1f stp x0, x1, [x19, #16 * 0] stp x2, x3, [x19, #16 * 1] stp x4, x5, [x19, #16 * 2] stp x6, x7, [x19, #16 * 3] stp x8, x9, [x19, #16 * 4] stp x10, x11, [x19, #16 * 5] stp x12, x13, [x19, #16 * 6] stp x14, x15, [x19, #16 * 7] stp x16, x17, [x19, #16 * 8] 1: ldp xzr, x19, [sp], #16 ret END(arm_smccc_1_2_\insn) .endm /* int arm_smccc_1_2_*(const struct arm_smccc_1_2_regs *args, * struct arm_smccc_1_2_regs *res) */ arm_smccc_1_2 hvc arm_smccc_1_2 smc + +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/kern/firmw.S b/sys/kern/firmw.S index f0377c4e0584..cd808d4a9396 100644 --- a/sys/kern/firmw.S +++ b/sys/kern/firmw.S @@ -1,47 +1,53 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 John Baldwin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory (Department of Computer Science and * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the * DARPA SSITH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #define FIRMW_START(S) __CONCAT(_binary_, __CONCAT(S, _start)) #define FIRMW_END(S) __CONCAT(_binary_, __CONCAT(S, _end)) .section rodata, "a", %progbits .globl FIRMW_START(FIRMW_SYMBOL) .type FIRMW_START(FIRMW_SYMBOL), %object FIRMW_START(FIRMW_SYMBOL): .incbin FIRMW_FILE .size FIRMW_START(FIRMW_SYMBOL), . - FIRMW_START(FIRMW_SYMBOL) .globl FIRMW_END(FIRMW_SYMBOL) .type FIRMW_END(FIRMW_SYMBOL), %object FIRMW_END(FIRMW_SYMBOL): .size FIRMW_END(FIRMW_SYMBOL), . - FIRMW_END(FIRMW_SYMBOL) + +#if defined(__aarch64__) +#include +#include +GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) +#endif diff --git a/sys/modules/vmm/Makefile b/sys/modules/vmm/Makefile index a98f0f27e25b..6737d868f2ea 100644 --- a/sys/modules/vmm/Makefile +++ b/sys/modules/vmm/Makefile @@ -1,145 +1,145 @@ .include KMOD= vmm SRCS= opt_acpi.h opt_bhyve_snapshot.h opt_ddb.h \ acpi_if.h bus_if.h device_if.h pci_if.h pcib_if.h vnode_if.h CFLAGS+= -DVMM_KEEP_STATS CFLAGS+= -I${SRCTOP}/sys/${MACHINE}/vmm CFLAGS+= -I${SRCTOP}/sys/${MACHINE}/vmm/io # generic vmm support .PATH: ${SRCTOP}/sys/${MACHINE}/vmm SRCS+= vmm.c \ vmm_dev.c \ vmm_instruction_emul.c \ vmm_stat.c .if ${MACHINE_CPUARCH} == "aarch64" DPSRCS+= assym.inc # TODO: Add the new EL2 code SRCS+= vmm_arm64.c \ vmm_reset.c \ vmm_call.S \ vmm_mmu.c \ vmm_hyp_el2.S .PATH: ${SRCTOP}/sys/${MACHINE}/vmm/io SRCS+= vgic.c \ vgic_if.h \ vgic_if.c \ vgic_v3.c \ vtimer.c SRCS+= vmm_hyp_exception.S vmm_hyp.c CLEANFILES+= vmm_hyp_blob.elf.full CLEANFILES+= vmm_hyp_blob.elf vmm_hyp_blob.bin vmm_hyp_exception.o: vmm_hyp_exception.S ${CC} -c -x assembler-with-cpp -DLOCORE \ ${CFLAGS:N-fsanitize*:N-fno-sanitize*:N-mbranch-protection*} \ ${.IMPSRC} -o ${.TARGET} -fpie vmm_hyp.o: vmm_hyp.c ${CC} -c ${CFLAGS:N-fsanitize*:N-fno-sanitize*:N-mbranch-protection*} \ ${.IMPSRC} -o ${.TARGET} -fpie vmm_hyp_blob.elf.full: vmm_hyp_exception.o vmm_hyp.o ${LD} -m ${LD_EMULATION} -Bdynamic -T ${SYSDIR}/conf/ldscript.arm64 \ - ${_LDFLAGS} --no-warn-mismatch --warn-common --export-dynamic \ + ${_LDFLAGS:N-zbti-report*} --no-warn-mismatch --warn-common --export-dynamic \ --dynamic-linker /red/herring -X -o ${.TARGET} ${.ALLSRC} \ --defsym=_start='0x0' --defsym=text_start='0x0' vmm_hyp_blob.elf: vmm_hyp_blob.elf.full ${OBJCOPY} --strip-debug ${.ALLSRC} ${.TARGET} vmm_hyp_blob.bin: vmm_hyp_blob.elf ${OBJCOPY} --output-target=binary ${.ALLSRC} ${.TARGET} vmm_hyp_el2.o: vmm_hyp_blob.bin .elif ${MACHINE_CPUARCH} == "amd64" DPSRCS+= vmx_assym.h svm_assym.h DPSRCS+= vmx_genassym.c svm_genassym.c offset.inc CFLAGS+= -I${SRCTOP}/sys/amd64/vmm/intel CFLAGS+= -I${SRCTOP}/sys/amd64/vmm/amd SRCS+= vmm_host.c \ vmm_ioport.c \ vmm_lapic.c \ vmm_mem.c \ vmm_util.c \ x86.c .PATH: ${SRCTOP}/sys/${MACHINE}/vmm/io SRCS+= iommu.c \ ppt.c \ vatpic.c \ vatpit.c \ vhpet.c \ vioapic.c \ vlapic.c \ vpmtmr.c \ vrtc.c # intel-specific files .PATH: ${SRCTOP}/sys/amd64/vmm/intel SRCS+= ept.c \ vmcs.c \ vmx_msr.c \ vmx_support.S \ vmx.c \ vtd.c # amd-specific files .PATH: ${SRCTOP}/sys/amd64/vmm/amd SRCS+= vmcb.c \ amdiommu.c \ ivhd_if.c \ ivhd_if.h \ svm.c \ svm_support.S \ npt.c \ ivrs_drv.c \ amdvi_hw.c \ svm_msr.c SRCS.BHYVE_SNAPSHOT= vmm_snapshot.c CLEANFILES+= vmx_assym.h vmx_genassym.o svm_assym.h svm_genassym.o OBJS_DEPEND_GUESS.vmx_support.o+= vmx_assym.h OBJS_DEPEND_GUESS.svm_support.o+= svm_assym.h .endif vmx_assym.h: vmx_genassym.o sh ${SYSDIR}/kern/genassym.sh vmx_genassym.o > ${.TARGET} svm_assym.h: svm_genassym.o sh ${SYSDIR}/kern/genassym.sh svm_genassym.o > ${.TARGET} vmx_support.o: ${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \ ${.IMPSRC} -o ${.TARGET} svm_support.o: ${CC} -c -x assembler-with-cpp -DLOCORE ${CFLAGS} \ ${.IMPSRC} -o ${.TARGET} hyp_genassym.o: offset.inc ${CC} -c ${CFLAGS:N-flto:N-fno-common} -fcommon ${.IMPSRC} vmx_genassym.o: offset.inc ${CC} -c ${CFLAGS:N-flto*:N-fno-common:N-fsanitize*:N-fno-sanitize*} \ -fcommon ${.IMPSRC} svm_genassym.o: offset.inc ${CC} -c ${CFLAGS:N-flto*:N-fno-common:N-fsanitize*:N-fno-sanitize*} \ -fcommon ${.IMPSRC} .include