diff --git a/sys/arm64/arm64/db_trace.c b/sys/arm64/arm64/db_trace.c --- a/sys/arm64/arm64/db_trace.c +++ b/sys/arm64/arm64/db_trace.c @@ -56,7 +56,7 @@ dbg_show_watchpoint(); } -static void +static void __nosanitizeaddress db_stack_trace_cmd(struct thread *td, struct unwind_state *frame) { c_db_sym_t sym; @@ -135,7 +135,7 @@ } } -int +int __nosanitizeaddress db_trace_thread(struct thread *thr, int count) { struct unwind_state frame; @@ -152,7 +152,7 @@ return (0); } -void +void __nosanitizeaddress db_trace_self(void) { struct unwind_state frame; diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -153,6 +153,17 @@ str x27, [x0, #BP_KERN_TTBR0] str x23, [x0, #BP_BOOT_EL] +#ifdef KASAN + /* Save bootparams */ + mov x19, x0 + + /* Bootstrap an early shadow map for the boot stack. */ + bl pmap_san_bootstrap + + /* Restore bootparams */ + mov x0, x19 +#endif + /* trace back starts here */ mov fp, #0 /* Branch to C code */ diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c --- a/sys/arm64/arm64/machdep.c +++ b/sys/arm64/arm64/machdep.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -825,6 +826,18 @@ /* Exclude entries needed in the DMAP region, but not phys_avail */ if (efihdr != NULL) exclude_efi_map_entries(efihdr); + + /* + * We carefully bootstrap the sanitizer map after we've excluded + * absolutely everything else that could impact phys_avail. There's not + * always enough room for the initial shadow map after the kernel, so + * we'll end up searching for segments that we can safely use. Those + * segments also get excluded from phys_avail. + */ +#if defined(KASAN) + pmap_bootstrap_san(KERNBASE - abp->kern_delta); +#endif + physmem_init_kernel_globals(); devmap_bootstrap(0, NULL); @@ -868,6 +881,7 @@ pan_enable(); kcsan_cpu_init(0); + kasan_init(); env = kern_getenv("kernelname"); if (env != NULL) diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -108,6 +108,7 @@ #include "opt_vm.h" #include +#include #include #include #include @@ -146,6 +147,7 @@ #include #include +#include #include #include #include @@ -182,6 +184,10 @@ #define pmap_l1_pindex(v) (NUL2E + ((v) >> L1_SHIFT)) #define pmap_l2_pindex(v) ((v) >> L2_SHIFT) +/* TODO: verify PTE bits. */ +#define PMAP_SAN_PTE_BITS ATTR_DEFAULT | ATTR_S1_XN | \ + ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW) + static struct md_page * pa_to_pvh(vm_paddr_t pa) { @@ -827,6 +833,7 @@ u_int l1_slot; u_int l2_slot; vm_offset_t freemempos; + bool bootstrapped; }; static void @@ -869,7 +876,8 @@ u_int l1_slot; /* Make sure there is a valid L0 -> L1 table */ - pmap_bootstrap_dmap_l0_table(state); + if (!state->bootstrapped) + pmap_bootstrap_dmap_l0_table(state); /* Link the level 1 table to a level 2 table */ l1_slot = pmap_l1_index(state->va); @@ -903,7 +911,8 @@ u_int l2_slot; /* Make sure there is a valid L1 -> L2 table */ - pmap_bootstrap_dmap_l1_table(state); + if (!state->bootstrapped) + pmap_bootstrap_dmap_l1_table(state); /* Link the level 2 table to a level 3 table */ l2_slot = pmap_l2_index(state->va); @@ -936,7 +945,8 @@ return; /* Make sure there is a valid L1 table */ - pmap_bootstrap_dmap_l1_table(state); + if (!state->bootstrapped) + pmap_bootstrap_dmap_l1_table(state); MPASS((state->va & L2_OFFSET) == 0); for (first = true; @@ -971,7 +981,8 @@ return; /* Make sure there is a valid L2 table */ - pmap_bootstrap_dmap_l2_table(state); + if (!state->bootstrapped) + pmap_bootstrap_dmap_l2_table(state); MPASS((state->va & L3_OFFSET) == 0); for (first = true; @@ -1126,6 +1137,43 @@ return l3pt; } +#ifdef KASAN +static void +pmap_bootstrap_allocate_kasan_l2(vm_paddr_t start_pa, vm_paddr_t end_pa, + vm_offset_t *start_va, int *nkasan_l2) +{ + int i; + vm_paddr_t pa; + vm_offset_t va; + pd_entry_t *l2; + + va = *start_va; + pa = rounddown2(end_pa - L2_SIZE, L2_SIZE); + l2 = pmap_l2(kernel_pmap, va); + + for (i = 0; pa >= start_pa && i < *nkasan_l2; + i++, va += L2_SIZE, pa -= L2_SIZE, l2++) { + /* + * KASAN stack checking results in us having already allocated + * part of our shadow map, so we can just skip those segments. + */ + if ((pmap_load(l2) & ATTR_DESCR_VALID) != 0) { + pa += L2_SIZE; + continue; + } + + pmap_store(l2, (pa & ~Ln_TABLE_MASK) | PMAP_SAN_PTE_BITS | + L2_BLOCK); + } + + bzero((void *)PHYS_TO_DMAP(pa), i * L2_SIZE); + physmem_exclude_region(pa, i * L2_SIZE, EXFLAG_NOALLOC); + + *nkasan_l2 -= i; + *start_va = va; +} +#endif + /* * Bootstrap the system enough to run with virtual memory. */ @@ -1224,6 +1272,71 @@ cpu_tlb_flushID(); } +#if defined(KASAN) +void +pmap_bootstrap_san(vm_paddr_t kernstart) +{ + /* + * Finish constructing the initial shadow map: + * - Count how many pages from KERNBASE to virtual_avail (scaled for + * shadow map) + * - Map that entire range using L2 superpages, stealing free physical + * memory by increasing 'pa' + */ + vm_offset_t start_va, va; + int shadow_npages, nkasan_l2; + int i; + + /* + * Rebuild physmap one more time, we may have excluded more regions from + * allocation since pmap_bootstrap(). + */ + bzero(physmap, sizeof(physmap)); + physmap_idx = physmem_avail(physmap, nitems(physmap)); + physmap_idx /= 2; + + shadow_npages = howmany(virtual_avail - VM_MIN_KERNEL_ADDRESS, PAGE_SIZE); + shadow_npages /= KASAN_SHADOW_SCALE; + nkasan_l2 = howmany(shadow_npages, Ln_ENTRIES); + + /* Map the valid KVA up to this point. */ + start_va = va = KASAN_MIN_ADDRESS; + + /* + * Find a slot in the physmap large enough for what we needed. We try to put + * the shadow map as high up as we can to avoid depleting the lower 4GB in case + * it's needed for, e.g., an xhci controller that can only do 32-bit DMA. + */ + for (i = (physmap_idx * 2) - 2; i >= 0 && nkasan_l2 > 0; i -= 2) { + vm_paddr_t plow, phigh; + + /* L2 mappings must be backed by memory that is L2-aligned */ + plow = roundup2(physmap[i], L2_SIZE); + phigh = physmap[i + 1]; + if (plow >= phigh) + continue; + if (kernstart >= plow && kernstart < phigh) + phigh = kernstart; + if (phigh - plow >= L2_SIZE) + pmap_bootstrap_allocate_kasan_l2(plow, phigh, &va, + &nkasan_l2); + } + + if (nkasan_l2 != 0) + panic("Could not find phys region for shadow map"); + + /* + * Done. We should now have a valid shadow address mapped for all KVA + * that has been mapped so far, i.e. KERNBASE to virtual_avail. Thus, + * shadow accesses by the kasan(9) runtime will succeed for this range. + * When the kernel virtual address range is later expanded, as will + * happen in vm_mem_init(), the shadow map will be grown as well. This + * is handled by pmap_san_enter(). + */ + bzero((void *)(uintptr_t)start_va, va - start_va); +} +#endif + /* * Initialize a vm_page's machine-dependent fields. */ @@ -2397,6 +2510,8 @@ addr = roundup2(addr, L2_SIZE); if (addr - 1 >= vm_map_max(kernel_map)) addr = vm_map_max(kernel_map); + if (kernel_vm_end < addr) + kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end); while (kernel_vm_end < addr) { l0 = pmap_l0(kernel_pmap, kernel_vm_end); KASSERT(pmap_load(l0) != 0, @@ -7294,6 +7409,164 @@ return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH); } +#if defined(KASAN) +static vm_offset_t pmap_san_early_l0pt; +static vm_paddr_t pmap_san_early_kernstart; +static pd_entry_t *pmap_san_early_l2; + +void __nosanitizeaddress +pmap_san_bootstrap(struct arm64_bootparams *abp) +{ + + pmap_san_early_l0pt = abp->kern_l0pt; + pmap_san_early_kernstart = KERNBASE - abp->kern_delta; + kasan_init_early(abp->kern_stack - (KSTACK_PAGES * PAGE_SIZE), + KSTACK_PAGES * PAGE_SIZE); +} + +#define SAN_BOOTSTRAP_L2_SIZE (1 * L2_SIZE) +#define SAN_BOOTSTRAP_SIZE (2 * PAGE_SIZE) +static vm_offset_t __nosanitizeaddress +pmap_san_enter_bootstrap_alloc_l2(void) +{ + static uint8_t bootstrap_data[SAN_BOOTSTRAP_L2_SIZE] __aligned(L2_SIZE); + static size_t offset = 0; + vm_offset_t addr; + + if (offset + L2_SIZE > sizeof(bootstrap_data)) { + panic("%s: out of memory for the bootstrap shadow map L2 entries", + __func__); + } + + addr = (uintptr_t)&bootstrap_data[offset]; + offset += L2_SIZE; + return (addr); +} + +/* + * SAN L1 + L2 pages, maybe L3 entries later? + */ +static vm_offset_t __nosanitizeaddress +pmap_san_enter_bootstrap_alloc_pages(int npages) +{ + static uint8_t bootstrap_data[SAN_BOOTSTRAP_SIZE] __aligned(PAGE_SIZE); + static size_t offset = 0; + vm_offset_t addr; + + if (offset + (npages * PAGE_SIZE) > sizeof(bootstrap_data)) { + panic("%s: out of memory for the bootstrap shadow map", + __func__); + } + + addr = (uintptr_t)&bootstrap_data[offset]; + offset += (npages * PAGE_SIZE); + return (addr); +} + +static void __nosanitizeaddress +pmap_san_enter_bootstrap(void) +{ + vm_offset_t freemempos; + struct dmap_bootstrap_state state = { + .va = KASAN_MIN_ADDRESS, + .l0_slot = Ln_ENTRIES, + .l1_slot = Ln_ENTRIES, +#if PAGE_SIZE == PAGE_SIZE_16K + /* + * On a 16k kernel, we have two L0 entries and they're both + * already allocated, so we can simply skip it. + */ + .bootstrapped = true, +#endif + }; + + /* L1, L2 */ + freemempos = pmap_san_enter_bootstrap_alloc_pages(2); + state.freemempos = freemempos; + pmap_bootstrap_dmap_l1_table(&state); + pmap_san_early_l2 = state.l2; +} + +static vm_page_t +pmap_san_enter_alloc_l3(void) +{ + vm_page_t m; + + m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); + if (m == NULL) + panic("%s: no memory to grow shadow map", __func__); + return (m); +} + +static vm_page_t +pmap_san_enter_alloc_l2(void) +{ + return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO, + Ln_ENTRIES, 0, ~0ul, L2_SIZE, 0, VM_MEMATTR_DEFAULT)); +} + +void __nosanitizeaddress +pmap_san_enter(vm_offset_t va) +{ + pd_entry_t *l1, *l2; + pt_entry_t *l3; + vm_page_t m; + + if (virtual_avail == 0) { + vm_offset_t block; + int slot; + bool first; + + /* Temporary shadow map prior to pmap_bootstrap(). */ + first = pmap_san_early_l2 == NULL; + if (first) + pmap_san_enter_bootstrap(); + + l2 = pmap_san_early_l2; + slot = pmap_l2_index(va); + + if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) { + MPASS(first); + block = pmap_san_enter_bootstrap_alloc_l2(); + pmap_store(&l2[slot], pmap_early_vtophys(block) | + PMAP_SAN_PTE_BITS | L2_BLOCK); + } + + return; + } + + mtx_assert(&kernel_map->system_mtx, MA_OWNED); + l1 = pmap_l1(kernel_pmap, va); + MPASS(l1 != NULL); + if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) { + m = pmap_san_enter_alloc_l3(); + pmap_store(l1, (VM_PAGE_TO_PHYS(m) & ~Ln_TABLE_MASK) | + PMAP_SAN_PTE_BITS | L1_TABLE); + } + l2 = pmap_l1_to_l2(l1, va); + if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) { + m = pmap_san_enter_alloc_l2(); + if (m != NULL) { + pmap_store(l2, VM_PAGE_TO_PHYS(m) | + PMAP_SAN_PTE_BITS | L2_BLOCK); + } else { + m = pmap_san_enter_alloc_l3(); + pmap_store(l2, VM_PAGE_TO_PHYS(m) | + PMAP_SAN_PTE_BITS | L2_TABLE); + } + } + if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) + return; + l3 = pmap_l2_to_l3(l2, va); + if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0) + return; + m = pmap_san_enter_alloc_l3(); + pmap_store(l3, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS | L3_PAGE); + +} +#endif /* KASAN */ + /* * Track a range of the kernel's virtual address space that is contiguous * in various mapping attributes. @@ -7462,6 +7735,10 @@ sbuf_printf(sb, "\nDirect map:\n"); else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS)) sbuf_printf(sb, "\nKernel map:\n"); +#ifdef KASAN + else if (i == pmap_l0_index(KASAN_MIN_ADDRESS)) + sbuf_printf(sb, "\nKASAN shadow map:\n"); +#endif l0e = kernel_pmap->pm_l0[i]; if ((l0e & ATTR_DESCR_VALID) == 0) { diff --git a/sys/arm64/arm64/stack_machdep.c b/sys/arm64/arm64/stack_machdep.c --- a/sys/arm64/arm64/stack_machdep.c +++ b/sys/arm64/arm64/stack_machdep.c @@ -41,7 +41,7 @@ #include #include -static void +static void __nosanitizeaddress stack_capture(struct thread *td, struct stack *st, struct unwind_state *frame) { @@ -56,7 +56,7 @@ } } -int +int __nosanitizeaddress stack_save_td(struct stack *st, struct thread *td) { struct unwind_state frame; @@ -75,7 +75,7 @@ return (0); } -void +void __nosanitizeaddress stack_save(struct stack *st) { struct unwind_state frame; diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c --- a/sys/arm64/arm64/trap.c +++ b/sys/arm64/arm64/trap.c @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -441,6 +442,8 @@ uint64_t esr, far; int dfsc; + kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); + /* Read the esr register to get the exception details */ esr = frame->tf_esr; exception = ESR_ELx_EXCEPTION(esr); @@ -547,6 +550,8 @@ ("Invalid pcpu address from userland: %p (tpidr %lx)", get_pcpu(), READ_SPECIALREG(tpidr_el1))); + kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); + esr = frame->tf_esr; exception = ESR_ELx_EXCEPTION(esr); switch (exception) { @@ -699,6 +704,8 @@ { uint64_t esr, far; + kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); + far = READ_SPECIALREG(far_el1); esr = frame->tf_esr; @@ -713,6 +720,8 @@ { uint64_t esr, far; + kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0); + far = READ_SPECIALREG(far_el1); esr = frame->tf_esr; diff --git a/sys/arm64/include/asan.h b/sys/arm64/include/asan.h new file mode 100644 --- /dev/null +++ b/sys/arm64/include/asan.h @@ -0,0 +1,68 @@ +/*- + * SPDX-License-Identifier: BSD-2-Clause-FreeBSD + * + * Copyright (c) 2020 The FreeBSD Foundation + * + * This software was developed by Mark Johnston under sponsorship from the + * FreeBSD Foundation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _MACHINE_ASAN_H_ +#define _MACHINE_ASAN_H_ + +#ifdef KASAN + +#include +#include +#include +#include + +static inline vm_offset_t +kasan_md_addr_to_shad(vm_offset_t addr) +{ + return (((addr - VM_MIN_KERNEL_ADDRESS) >> KASAN_SHADOW_SCALE_SHIFT) + + KASAN_MIN_ADDRESS); +} + +static inline bool +kasan_md_unsupported(vm_offset_t addr) +{ + return (addr < VM_MIN_KERNEL_ADDRESS || addr >= virtual_end); +} + +static inline void +kasan_md_init(void) +{ + +} + +static inline void +kasan_md_init_early(vm_offset_t bootstack, size_t size) +{ + + kasan_shadow_map(bootstack, size); +} + +#endif /* KASAN */ +#endif /* !_MACHINE_ASAN_H_ */ diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h --- a/sys/arm64/include/atomic.h +++ b/sys/arm64/include/atomic.h @@ -53,6 +53,10 @@ #define wmb() dmb(st) /* Full system memory barrier store */ #define rmb() dmb(ld) /* Full system memory barrier load */ +#ifdef _KERNEL +extern bool lse_supported; +#endif + #if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME) #include #else @@ -60,7 +64,6 @@ #include #ifdef _KERNEL -extern bool lse_supported; #ifdef LSE_ATOMICS #define _ATOMIC_LSE_SUPPORTED 1 diff --git a/sys/arm64/include/bus.h b/sys/arm64/include/bus.h --- a/sys/arm64/include/bus.h +++ b/sys/arm64/include/bus.h @@ -92,10 +92,6 @@ #define BUS_SPACE_BARRIER_READ 0x01 #define BUS_SPACE_BARRIER_WRITE 0x02 -#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME) -#include -#else - struct bus_space { /* cookie */ void *bs_cookie; @@ -282,6 +278,10 @@ bus_size_t, uint64_t); }; +#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME) +#include +#else + /* * Utility macros; INTERNAL USE ONLY. */ @@ -458,6 +458,15 @@ #define bus_space_set_multi_8(t, h, o, v, c) \ __bs_set(sm,8,(t),(h),(o),(v),(c)) +#define bus_space_set_multi_stream_1(t, h, o, v, c) \ + bus_space_set_multi_1((t), (h), (o), (v), (c)) +#define bus_space_set_multi_stream_2(t, h, o, v, c) \ + bus_space_set_multi_2((t), (h), (o), (v), (c)) +#define bus_space_set_multi_stream_4(t, h, o, v, c) \ + bus_space_set_multi_4((t), (h), (o), (v), (c)) +#define bus_space_set_multi_stream_8(t, h, o, v, c) \ + bus_space_set_multi_8((t), (h), (o), (v), (c)) + /* * Set region operations. */ @@ -470,6 +479,15 @@ #define bus_space_set_region_8(t, h, o, v, c) \ __bs_set(sr,8,(t),(h),(o),(v),(c)) +#define bus_space_set_region_stream_1(t, h, o, v, c) \ + bus_space_set_region_1((t), (h), (o), (v), (c)) +#define bus_space_set_region_stream_2(t, h, o, v, c) \ + bus_space_set_region_2((t), (h), (o), (v), (c)) +#define bus_space_set_region_stream_4(t, h, o, v, c) \ + bus_space_set_region_4((t), (h), (o), (v), (c)) +#define bus_space_set_region_stream_8(t, h, o, v, c) \ + bus_space_set_region_8((t), (h), (o), (v), (c)) + /* * Copy operations. */ diff --git a/sys/arm64/include/param.h b/sys/arm64/include/param.h --- a/sys/arm64/include/param.h +++ b/sys/arm64/include/param.h @@ -99,8 +99,12 @@ #define MAXPAGESIZES 3 /* maximum number of supported page sizes */ #ifndef KSTACK_PAGES +#if defined(KASAN) || defined(KMSAN) +#define KSTACK_PAGES 12 +#else #define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */ #endif +#endif #define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */ #define PCPU_PAGES 1 diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -215,6 +215,15 @@ return (0); } +#if defined(KASAN) +struct arm64_bootparams; + +void pmap_bootstrap_san(vm_paddr_t); +void pmap_san_enter(vm_offset_t); +void pmap_san_bootstrap(struct arm64_bootparams *); + +#endif + #endif /* _KERNEL */ #endif /* !LOCORE */ diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h --- a/sys/arm64/include/vmparam.h +++ b/sys/arm64/include/vmparam.h @@ -125,7 +125,10 @@ * Upper region: 0xffffffffffffffff Top of virtual memory * * 0xfffffeffffffffff End of DMAP - * 0xfffffa0000000000 Start of DMAP + * 0xffffa00000000000 Start of DMAP + * + * 0xffff009fffffffff End of KASAN shadow map + * 0xffff008000000000 Start of KASAN shadow map * * 0xffff007fffffffff End of KVA * 0xffff000000000000 Kernel base address & start of KVA @@ -156,6 +159,10 @@ #define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL) #define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL) +/* 128 GiB KASAN shadow map */ +#define KASAN_MIN_ADDRESS (0xffff008000000000UL) +#define KASAN_MAX_ADDRESS (0xffff00a000000000UL) + /* The address bits that hold a pointer authentication code */ #define PAC_ADDR_MASK (0xff7f000000000000UL) @@ -239,7 +246,9 @@ #define VM_INITIAL_PAGEIN 16 #endif +#if !defined(KASAN) && !defined(KMSAN) #define UMA_MD_SMALL_ALLOC +#endif #ifndef LOCORE diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64 --- a/sys/conf/files.arm64 +++ b/sys/conf/files.arm64 @@ -79,7 +79,8 @@ arm64/arm64/uio_machdep.c standard arm64/arm64/uma_machdep.c standard arm64/arm64/undefined.c standard -arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack +arm64/arm64/unwind.c optional ddb | kdtrace_hooks | stack \ + compile-with "${NORMAL_C:N-fsanitize*}" arm64/arm64/vfp.c standard arm64/arm64/vm_machdep.c standard diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk --- a/sys/conf/kern.pre.mk +++ b/sys/conf/kern.pre.mk @@ -102,6 +102,17 @@ -mllvm -asan-use-after-scope=true \ -mllvm -asan-instrumentation-with-call-threshold=0 \ -mllvm -asan-instrument-byval=false + +.if ${MACHINE_CPUARCH} == "aarch64" +# KASAN/ARM64 TODO: -asan-mapping-offset is calculated from: +# (VM_KERNEL_MIN_ADDRESS >> KASAN_SHADOW_SCALE_SHIFT) + $offset = KASAN_MIN_ADDRESS +# +# This is different than amd64, where we have a different +# KASAN_MIN_ADDRESS, and this offset value should eventually be +# upstreamed similar to: https://reviews.llvm.org/D98285 +# +SAN_CFLAGS+= -mllvm -asan-mapping-offset=0xdfff208000000000 +.endif .endif KCSAN_ENABLED!= grep KCSAN opt_global.h || true ; echo diff --git a/sys/dev/usb/controller/musb_otg_allwinner.c b/sys/dev/usb/controller/musb_otg_allwinner.c --- a/sys/dev/usb/controller/musb_otg_allwinner.c +++ b/sys/dev/usb/controller/musb_otg_allwinner.c @@ -45,6 +45,7 @@ #include #include #include + #include #include @@ -245,7 +246,7 @@ static uint8_t awusbdrd_bs_r_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o) { - const struct bus_space *bs = t; + struct bus_space *bs = t; switch (o) { case MUSB2_REG_HWVERS: @@ -273,7 +274,7 @@ static uint16_t awusbdrd_bs_r_2(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o) { - const struct bus_space *bs = t; + struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return (0); @@ -284,7 +285,7 @@ awusbdrd_bs_w_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint8_t v) { - const struct bus_space *bs = t; + struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; @@ -296,7 +297,7 @@ awusbdrd_bs_w_2(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint16_t v) { - const struct bus_space *bs = t; + struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; @@ -308,7 +309,7 @@ awusbdrd_bs_rm_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint8_t *d, bus_size_t c) { - const struct bus_space *bs = t; + struct bus_space *bs = t; bus_space_read_multi_1(bs_parent_space(bs), h, awusbdrd_reg(o), d, c); } @@ -317,7 +318,7 @@ awusbdrd_bs_rm_4(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, uint32_t *d, bus_size_t c) { - const struct bus_space *bs = t; + struct bus_space *bs = t; bus_space_read_multi_4(bs_parent_space(bs), h, awusbdrd_reg(o), d, c); } @@ -326,7 +327,7 @@ awusbdrd_bs_wm_1(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, const uint8_t *d, bus_size_t c) { - const struct bus_space *bs = t; + struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; @@ -338,7 +339,7 @@ awusbdrd_bs_wm_4(awusb_bs_tag t, bus_space_handle_t h, bus_size_t o, const uint32_t *d, bus_size_t c) { - const struct bus_space *bs = t; + struct bus_space *bs = t; if (awusbdrd_filt(o) != 0) return; diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c --- a/sys/kern/subr_asan.c +++ b/sys/kern/subr_asan.c @@ -257,6 +257,9 @@ size_t i, n, redz; int8_t *shad; + if (__predict_false(!kasan_enabled)) + return; + if ((vm_offset_t)addr >= DMAP_MIN_ADDRESS && (vm_offset_t)addr < DMAP_MAX_ADDRESS) return; diff --git a/sys/kern/subr_intr.c b/sys/kern/subr_intr.c --- a/sys/kern/subr_intr.c +++ b/sys/kern/subr_intr.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include @@ -319,6 +320,8 @@ KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__)); + kasan_mark(tf, sizeof(*tf), sizeof(*tf), 0); + VM_CNT_INC(v_intr); critical_enter(); td = curthread;