diff --git a/sys/arm64/arm64/copyinout.S b/sys/arm64/arm64/copyinout.S index 26dd0b4cf14f..e41c4b5f6734 100644 --- a/sys/arm64/arm64/copyinout.S +++ b/sys/arm64/arm64/copyinout.S @@ -1,225 +1,239 @@ /*- * Copyright (c) 2015 The FreeBSD Foundation * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include "assym.inc" .macro check_user_access user_arg, size_arg, bad_access_func - adds x6, x\user_arg, x\size_arg + /* + * TBI is enabled from 15.0. Clear the top byte of the userspace + * address before checking whether it's within the given limit. + * The later load/store instructions will fault if TBI is disabled + * for the current process. + */ + and x6, x\user_arg, #(~TBI_ADDR_MASK) + adds x6, x6, x\size_arg b.cs \bad_access_func ldr x7, =VM_MAXUSER_ADDRESS cmp x6, x7 b.hi \bad_access_func .endm /* * Fault handler for the copy{in,out} functions below. */ ENTRY(copyio_fault) SET_FAULT_HANDLER(xzr, x1) /* Clear the handler */ EXIT_USER_ACCESS_CHECK(w0, x1) copyio_fault_nopcb: mov x0, #EFAULT ret END(copyio_fault) /* * Copies from a kernel to user address * * int copyout(const void *kaddr, void *udaddr, size_t len) */ ENTRY(copyout) cbz x2, 1f check_user_access 1, 2, copyio_fault_nopcb b copycommon 1: mov x0, xzr /* return 0 */ ret END(copyout) /* * Copies from a user to kernel address * * int copyin(const void *uaddr, void *kdaddr, size_t len) */ ENTRY(copyin) cbz x2, 1f check_user_access 0, 2, copyio_fault_nopcb b copycommon 1: mov x0, xzr /* return 0 */ ret END(copyin) /* * Copies a string from a user to kernel address * * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done) */ ENTRY(copyinstr) mov x5, xzr /* count = 0 */ mov w4, #1 /* If zero return faulure */ cbz x2, 3f /* If len == 0 then skip loop */ adr x6, copyio_fault /* Get the handler address */ SET_FAULT_HANDLER(x6, x7) /* Set the handler */ + /* + * As in check_user_access mask off the TBI bits for the cmp + * instruction. The load will fail trap if TBI is disabled, but we + * need to check the address didn't wrap. + */ + and x6, x0, #(~TBI_ADDR_MASK) ldr x7, =VM_MAXUSER_ADDRESS -1: cmp x0, x7 +1: cmp x6, x7 b.cs copyio_fault ldtrb w4, [x0] /* Load from uaddr */ add x0, x0, #1 /* Next char */ strb w4, [x1], #1 /* Store in kaddr */ add x5, x5, #1 /* count++ */ + add x6, x6, #1 /* Increment masked address */ cbz w4, 2f /* Break when NUL-terminated */ sub x2, x2, #1 /* len-- */ cbnz x2, 1b 2: SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ 3: cbz x3, 4f /* Check if done != NULL */ str x5, [x3] /* done = count */ 4: mov w1, #ENAMETOOLONG /* Load ENAMETOOLONG to return if failed */ cmp w4, #0 /* Check if we saved the NUL-terminator */ csel w0, wzr, w1, eq /* If so return success, else failure */ ret END(copyinstr) /* * Local helper * * x0 - src pointer * x1 - dst pointer * x2 - size * lr - the return address, so jump here instead of calling * * This function is optimized to minimize concurrent memory accesses. In * present form it is suited for cores with a single memory prefetching * unit. * ARM64TODO: * Consider using separate functions for each ARM64 core. Adding memory * access interleaving might increase a total throughput on A57 or A72. */ .text .align 4 .local copycommon .type copycommon,@function copycommon: adr x6, copyio_fault /* Get the handler address */ SET_FAULT_HANDLER(x6, x7) /* Set the handler */ ENTER_USER_ACCESS(w6, x7) /* Check alignment */ orr x3, x0, x1 ands x3, x3, 0x07 b.eq aligned /* Unaligned is byte by byte copy */ byte_by_byte: ldrb w3, [x0], #0x01 strb w3, [x1], #0x01 subs x2, x2, #0x01 b.ne byte_by_byte b ending aligned: cmp x2, #0x10 b.lt lead_out cmp x2, #0x40 b.lt by_dwords_start /* Block copy */ lsr x15, x2, #0x06 by_blocks: ldp x3, x4, [x0], #0x10 ldp x5, x6, [x0], #0x10 ldp x7, x8, [x0], #0x10 ldp x9, x10, [x0], #0x10 stp x3, x4, [x1], #0x10 stp x5, x6, [x1], #0x10 stp x7, x8, [x1], #0x10 stp x9, x10, [x1], #0x10 subs x15, x15, #0x01 b.ne by_blocks and x2, x2, #0x3f by_dwords_start: lsr x15, x2, #0x04 cbz x15, lead_out by_dwords: ldp x3, x4, [x0], #0x10 stp x3, x4, [x1], #0x10 subs x15, x15, #0x01 b.ne by_dwords /* Less than 16 bytes to copy */ lead_out: tbz x2, #0x03, last_word ldr x3, [x0], #0x08 str x3, [x1], #0x08 last_word: tbz x2, #0x02, last_hword ldr w3, [x0], #0x04 str w3, [x1], #0x04 last_hword: tbz x2, #0x01, last_byte ldrh w3, [x0], #0x02 strh w3, [x1], #0x02 last_byte: tbz x2, #0x00, ending ldrb w3, [x0] strb w3, [x1] ending: EXIT_USER_ACCESS_CHECK(w6, x7) SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */ mov x0, xzr /* return 0 */ ret .size copycommon, . - copycommon GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/arm64/support.S b/sys/arm64/arm64/support.S index 2d067c7f7730..bf6fc931e4b0 100644 --- a/sys/arm64/arm64/support.S +++ b/sys/arm64/arm64/support.S @@ -1,393 +1,400 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2014-2015 The FreeBSD Foundation * All rights reserved. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include #include #include #include #include #include "assym.inc" .macro check_user_access user_arg, limit, bad_addr_func + /* + * TBI is enabled from 15.0. Clear the top byte of the userspace + * address before checking whether it's within the given limit. + * The later load/store instructions will fault if TBI is disabled + * for the current process. + */ + and x6, x\user_arg, #(~TBI_ADDR_MASK) ldr x7, =(\limit) - cmp x\user_arg, x7 + cmp x6, x7 b.cs \bad_addr_func .endm /* * One of the fu* or su* functions failed, return -1. */ ENTRY(fsu_fault) SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */ EXIT_USER_ACCESS_CHECK(w0, x1) fsu_fault_nopcb: mov x0, #-1 ret END(fsu_fault) /* * int swapueword8_llsc(volatile uint8_t *, uint8_t *) */ ENTRY(swapueword8_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldrb w7, [x1] ldxrb w2, [x0] stxrb w3, w7, [x0] cbnz w3, 1f strb w2, [x1] /* Stash old value in *val */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, w3 ret END(swapueword8_llsc) /* * int swapueword8_lse(volatile uint8_t *, uint8_t *) */ ENTRY(swapueword8_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldrb w7, [x1] .arch_extension lse swpb w7, w2, [x0] .arch_extension nolse strb w2, [x1] /* Stash old value in *val */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, #0 ret END(swapueword8_lse) /* * int swapueword32_llsc(volatile uint32_t *, uint32_t *) */ ENTRY(swapueword32_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldr w7, [x1] ldxr w2, [x0] /* Stash the old value in w2 */ stxr w3, w7, [x0] /* Store new value */ cbnz w3, 1f str w2, [x1] /* Stash old value in *val */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, w3 ret END(swapueword32_llsc) /* * int swapueword32_lse(volatile uint32_t *, uint32_t *) */ ENTRY(swapueword32_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldr w7, [x1] .arch_extension lse swp w7, w2, [x0] .arch_extension nolse str w2, [x1] /* Stash old value in *val */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) mov w0, #0 ret END(swapueword32_llsc) /* * int casueword32_llsc(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) */ ENTRY(casueword32_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ mov w5, #1 SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldxr w4, [x0] /* Load-exclusive the data */ cmp w4, w1 /* Compare */ b.ne 1f /* Not equal, exit */ stxr w5, w3, [x0] /* Store the new data */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str w4, [x2] /* Store the read data */ mov w0, w5 /* Result same as store status */ ret /* Return */ END(casueword32_llsc) /* * int casueword32_lse(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) */ ENTRY(casueword32_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) mov w7, w1 /* Back up the compare value */ .arch_extension lse cas w1, w3, [x0] /* Compare and Swap */ .arch_extension nolse cmp w1, w7 /* Check if successful */ cset w0, ne /* Return 0 on success, 1 on failure */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str w1, [x2] /* Store the read data */ ret /* Return */ END(casueword32_lse) /* * int casueword_llsc(volatile u_long *, u_long, u_long *, u_long) */ ENTRY(casueword_llsc) check_user_access 0, (VM_MAXUSER_ADDRESS-7), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ mov w5, #1 SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) ldxr x4, [x0] /* Load-exclusive the data */ cmp x4, x1 /* Compare */ b.ne 1f /* Not equal, exit */ stxr w5, x3, [x0] /* Store the new data */ 1: EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str x4, [x2] /* Store the read data */ mov w0, w5 /* Result same as store status */ ret /* Return */ END(casueword_llsc) /* * int casueword_lse(volatile u_long *, u_long, u_long *, u_long) */ ENTRY(casueword_lse) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb adr x6, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x6, x4) /* And set it */ ENTER_USER_ACCESS(w6, x4) mov x7, x1 /* Back up the compare value */ .arch_extension lse cas x1, x3, [x0] /* Compare and Swap */ .arch_extension nolse cmp x1, x7 /* Check if successful */ cset w0, ne /* Return 0 on success, 1 on failure */ EXIT_USER_ACCESS(w6) SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ str x1, [x2] /* Store the read data */ ret /* Return */ END(casueword_lse) .macro fsudata insn, ret_reg, user_arg adr x7, fsu_fault /* Load the fault handler */ SET_FAULT_HANDLER(x7, x6) /* And set it */ \insn \ret_reg, [x\user_arg] /* Try accessing the data */ SET_FAULT_HANDLER(xzr, x6) /* Reset the fault handler */ .endm /* * int fubyte(volatile const void *) */ ENTRY(fubyte) check_user_access 0, (VM_MAXUSER_ADDRESS), fsu_fault_nopcb fsudata ldtrb, w0, 0 ret /* Return */ END(fubyte) /* * int fuword(volatile const void *) */ ENTRY(fuword16) check_user_access 0, (VM_MAXUSER_ADDRESS-1), fsu_fault_nopcb fsudata ldtrh, w0, 0 ret /* Return */ END(fuword16) /* * int32_t fueword32(volatile const void *, int32_t *) */ ENTRY(fueword32) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb fsudata ldtr, w0, 0 str w0, [x1] /* Save the data in kernel space */ mov w0, #0 /* Success */ ret /* Return */ END(fueword32) /* * long fueword(volatile const void *, int64_t *) * int64_t fueword64(volatile const void *, int64_t *) */ EENTRY(fueword64) ENTRY(fueword) check_user_access 0, (VM_MAXUSER_ADDRESS-7), fsu_fault_nopcb fsudata ldtr, x0, 0 str x0, [x1] /* Save the data in kernel space */ mov x0, #0 /* Success */ ret /* Return */ END(fueword) EEND(fueword64) /* * int subyte(volatile void *, int) */ ENTRY(subyte) check_user_access 0, (VM_MAXUSER_ADDRESS), fsu_fault_nopcb fsudata sttrb, w1, 0 mov x0, #0 /* Success */ ret /* Return */ END(subyte) /* * int suword16(volatile void *, int) */ ENTRY(suword16) check_user_access 0, (VM_MAXUSER_ADDRESS-1), fsu_fault_nopcb fsudata sttrh, w1, 0 mov x0, #0 /* Success */ ret /* Return */ END(suword16) /* * int suword32(volatile void *, int) */ ENTRY(suword32) check_user_access 0, (VM_MAXUSER_ADDRESS-3), fsu_fault_nopcb fsudata sttr, w1, 0 mov x0, #0 /* Success */ ret /* Return */ END(suword32) /* * int suword(volatile void *, long) */ EENTRY(suword64) ENTRY(suword) check_user_access 0, (VM_MAXUSER_ADDRESS-7), fsu_fault_nopcb fsudata sttr, x1, 0 mov x0, #0 /* Success */ ret /* Return */ END(suword) EEND(suword64) ENTRY(setjmp) /* Store the stack pointer */ mov x8, sp str x8, [x0], #8 /* Store the general purpose registers and lr */ stp x19, x20, [x0], #16 stp x21, x22, [x0], #16 stp x23, x24, [x0], #16 stp x25, x26, [x0], #16 stp x27, x28, [x0], #16 stp x29, lr, [x0], #16 /* Return value */ mov x0, #0 ret END(setjmp) ENTRY(longjmp) /* Restore the stack pointer */ ldr x8, [x0], #8 mov sp, x8 /* Restore the general purpose registers and lr */ ldp x19, x20, [x0], #16 ldp x21, x22, [x0], #16 ldp x23, x24, [x0], #16 ldp x25, x26, [x0], #16 ldp x27, x28, [x0], #16 ldp x29, lr, [x0], #16 /* Load the return value */ mov x0, x1 ret END(longjmp) /* * pagezero, simple implementation */ ENTRY(pagezero_simple) add x1, x0, #PAGE_SIZE 1: stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 stp xzr, xzr, [x0], #0x10 cmp x0, x1 b.ne 1b ret END(pagezero_simple) /* * pagezero, cache assisted */ ENTRY(pagezero_cache) add x1, x0, #PAGE_SIZE adrp x2, dczva_line_size ldr x2, [x2, :lo12:dczva_line_size] 1: dc zva, x0 add x0, x0, x2 cmp x0, x1 b.ne 1b ret END(pagezero_cache) GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL) diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h index db3af1881282..c30ca1b2bff4 100644 --- a/sys/arm64/include/vmparam.h +++ b/sys/arm64/include/vmparam.h @@ -1,337 +1,340 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30 */ #ifdef __arm__ #include #else /* !__arm__ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ /* * Virtual memory related constants, all in bytes */ #ifndef MAXTSIZ #define MAXTSIZ (1*1024*1024*1024) /* max text size */ #endif #ifndef DFLDSIZ #define DFLDSIZ (128*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ #define MAXDSIZ (1*1024*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ #define DFLSSIZ (128*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (1*1024*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128*1024) /* amount to grow stack */ #endif /* * The physical address space is sparsely populated. */ #define VM_PHYSSEG_SPARSE /* * The number of PHYSSEG entries. */ #define VM_PHYSSEG_MAX 64 /* * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool from * which physical pages are allocated and VM_FREEPOOL_DIRECT is the pool from * which physical pages for page tables and small UMA objects are allocated. * VM_FREEPOOL_LAZYINIT is a special-purpose pool that is populated only during * boot and is used to implement deferred initialization of page structures. */ #define VM_NFREEPOOL 3 #define VM_FREEPOOL_LAZYINIT 0 #define VM_FREEPOOL_DEFAULT 1 #define VM_FREEPOOL_DIRECT 2 /* * Create two free page lists: VM_FREELIST_DMA32 is for physical pages that have * physical addresses below 4G, and VM_FREELIST_DEFAULT is for all other * physical pages. */ #define VM_NFREELIST 2 #define VM_FREELIST_DEFAULT 0 #define VM_FREELIST_DMA32 1 /* * When PAGE_SIZE is 4KB, an allocation size of 16MB is supported in order * to optimize the use of the direct map by UMA. Specifically, a 64-byte * cache line contains at most 8 L2 BLOCK entries, collectively mapping 16MB * of physical memory. By reducing the number of distinct 16MB "pages" that * are used by UMA, the physical memory allocator reduces the likelihood of * both 2MB page TLB misses and cache misses during the page table walk when * a 2MB page TLB miss does occur. * * When PAGE_SIZE is 16KB, an allocation size of 32MB is supported. This * size is used by level 0 reservations and L2 BLOCK mappings. */ #if PAGE_SIZE == PAGE_SIZE_4K #define VM_NFREEORDER 13 #elif PAGE_SIZE == PAGE_SIZE_16K #define VM_NFREEORDER 12 #else #error Unsupported page size #endif /* * Enable superpage reservations: 2 levels. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 2 #endif /* * Level 0 reservations consist of 16 pages when PAGE_SIZE is 4KB, and 128 * pages when PAGE_SIZE is 16KB. Level 1 reservations consist of 32 64KB * pages when PAGE_SIZE is 4KB, and 16 2M pages when PAGE_SIZE is 16KB. */ #if PAGE_SIZE == PAGE_SIZE_4K #ifndef VM_LEVEL_0_ORDER #define VM_LEVEL_0_ORDER 4 #endif #ifndef VM_LEVEL_1_ORDER #define VM_LEVEL_1_ORDER 5 #endif #elif PAGE_SIZE == PAGE_SIZE_16K #ifndef VM_LEVEL_0_ORDER #define VM_LEVEL_0_ORDER 7 #endif #ifndef VM_LEVEL_1_ORDER #define VM_LEVEL_1_ORDER 4 #endif #else #error Unsupported page size #endif /** * Address space layout. * * ARMv8 implements up to a 48 bit virtual address space. The address space is * split into 2 regions at each end of the 64 bit address space, with an * out of range "hole" in the middle. * * We use the full 48 bits for each region, however the kernel may only use * a limited range within this space. * * Upper region: 0xffffffffffffffff Top of virtual memory * * 0xfffffeffffffffff End of DMAP * 0xffffa00000000000 Start of DMAP * * 0xffff027fffffffff End of KMSAN origin map * 0xffff020000000000 Start of KMSAN origin map * * 0xffff017fffffffff End of KMSAN shadow map * 0xffff010000000000 Start of KMSAN shadow map * * 0xffff009fffffffff End of KASAN shadow map * 0xffff008000000000 Start of KASAN shadow map * * 0xffff007fffffffff End of KVA * 0xffff000000000000 Kernel base address & start of KVA * * Hole: 0xfffeffffffffffff * 0x0001000000000000 * * Lower region: 0x0000ffffffffffff End of user address space * 0x0000000000000000 Start of user address space * * We use the upper region for the kernel, and the lower region for userland. * * We define some interesting address constants: * * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire * 64 bit address space, mostly just for convenience. * * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of * mappable kernel virtual address space. * * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the * user address space. */ #define VM_MIN_ADDRESS (0x0000000000000000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) /* 512 GiB of kernel addresses */ #define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL) #define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL) /* 128 GiB KASAN shadow map */ #define KASAN_MIN_ADDRESS (0xffff008000000000UL) #define KASAN_MAX_ADDRESS (0xffff00a000000000UL) /* 512GiB KMSAN shadow map */ #define KMSAN_SHAD_MIN_ADDRESS (0xffff010000000000UL) #define KMSAN_SHAD_MAX_ADDRESS (0xffff018000000000UL) /* 512GiB KMSAN origin map */ #define KMSAN_ORIG_MIN_ADDRESS (0xffff020000000000UL) #define KMSAN_ORIG_MAX_ADDRESS (0xffff028000000000UL) /* The address bits that hold a pointer authentication code */ #define PAC_ADDR_MASK (0xff7f000000000000UL) +/* The top-byte ignore address bits */ +#define TBI_ADDR_MASK 0xff00000000000000UL + /* If true addr is in the kernel address space */ #define ADDR_IS_KERNEL(addr) (((addr) & (1ul << 55)) == (1ul << 55)) /* If true addr is in the user address space */ #define ADDR_IS_USER(addr) (((addr) & (1ul << 55)) == 0) /* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */ #define ADDR_IS_CANONICAL(addr) \ (((addr) & 0xffff000000000000UL) == 0 || \ ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL) #define ADDR_MAKE_CANONICAL(addr) ({ \ __typeof(addr) _tmp_addr = (addr); \ \ _tmp_addr &= ~0xffff000000000000UL; \ if (ADDR_IS_KERNEL(addr)) \ _tmp_addr |= 0xffff000000000000UL; \ \ _tmp_addr; \ }) /* 95 TiB maximum for the direct map region */ #define DMAP_MIN_ADDRESS (0xffffa00000000000UL) #define DMAP_MAX_ADDRESS (0xffffff0000000000UL) #define DMAP_MIN_PHYSADDR (dmap_phys_base) #define DMAP_MAX_PHYSADDR (dmap_phys_max) /* * Checks to see if a physical address is in the DMAP range. * - PHYS_IN_DMAP_RANGE will return true that may be within the DMAP range * but not accessible through the DMAP, e.g. device memory between two * DMAP physical address regions. * - PHYS_IN_DMAP will check if DMAP address is mapped before returning true. * * PHYS_IN_DMAP_RANGE should only be used when a check on the address is * performed, e.g. by checking the physical address is within phys_avail, * or checking the virtual address is mapped. */ #define PHYS_IN_DMAP_RANGE(pa) ((pa) >= DMAP_MIN_PHYSADDR && \ (pa) < DMAP_MAX_PHYSADDR) #define PHYS_IN_DMAP(pa) (PHYS_IN_DMAP_RANGE(pa) && \ pmap_klookup(PHYS_TO_DMAP(pa), NULL)) /* True if va is in the dmap range */ #define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \ (va) < (dmap_max_addr)) #define PMAP_HAS_DMAP 1 #define PHYS_TO_DMAP(pa) \ ({ \ KASSERT(PHYS_IN_DMAP_RANGE(pa), \ ("%s: PA out of range, PA: 0x%lx", __func__, \ (vm_paddr_t)(pa))); \ ((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS; \ }) #define DMAP_TO_PHYS(va) \ ({ \ KASSERT(VIRT_IN_DMAP(va), \ ("%s: VA out of range, VA: 0x%lx", __func__, \ (vm_offset_t)(va))); \ ((va) - DMAP_MIN_ADDRESS) + dmap_phys_base; \ }) #define VM_MIN_USER_ADDRESS (0x0000000000000000UL) #define VM_MAX_USER_ADDRESS (0x0001000000000000UL) #define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS) #define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS) #define KERNBASE (VM_MIN_KERNEL_ADDRESS) #define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) #define USRSTACK SHAREDPAGE /* * How many physical pages per kmem arena virtual page. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (1) #endif /* * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the * kernel map. */ #ifndef VM_KMEM_SIZE_MAX #define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \ VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5) #endif /* * Initial pagein size of beginning of executable file. */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #if !defined(KASAN) && !defined(KMSAN) #define UMA_USE_DMAP #endif #ifndef LOCORE extern vm_paddr_t dmap_phys_base; extern vm_paddr_t dmap_phys_max; extern vm_offset_t dmap_max_addr; #endif #define ZERO_REGION_SIZE (64 * 1024) /* 64KB */ #define DEVMAP_MAX_VADDR VM_MAX_KERNEL_ADDRESS /* * The pmap can create non-transparent large page mappings. */ #define PMAP_HAS_LARGEPAGES 1 /* * Need a page dump array for minidump. */ #define MINIDUMP_PAGE_TRACKING 1 #define MINIDUMP_STARTUP_PAGE_TRACKING 1 #endif /* !_MACHINE_VMPARAM_H_ */ #endif /* !__arm__ */