diff --git a/sys/arm64/arm64/db_trace.c b/sys/arm64/arm64/db_trace.c
--- a/sys/arm64/arm64/db_trace.c
+++ b/sys/arm64/arm64/db_trace.c
@@ -56,7 +56,7 @@
 	dbg_show_watchpoint();
 }
 
-static void
+static void __nosanitizeaddress
 db_stack_trace_cmd(struct thread *td, struct unwind_state *frame)
 {
 	c_db_sym_t sym;
@@ -135,7 +135,7 @@
 	}
 }
 
-int
+int __nosanitizeaddress
 db_trace_thread(struct thread *thr, int count)
 {
 	struct unwind_state frame;
@@ -152,7 +152,7 @@
 	return (0);
 }
 
-void
+void __nosanitizeaddress
 db_trace_self(void)
 {
 	struct unwind_state frame;
diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S
--- a/sys/arm64/arm64/locore.S
+++ b/sys/arm64/arm64/locore.S
@@ -148,6 +148,17 @@
 	str	x23, [x0, #BP_BOOT_EL]
 	str	x4,  [x0, #BP_HCR_EL2]
 
+#ifdef KASAN
+	/* Save bootparams */
+	mov	x19, x0
+
+	/* Bootstrap an early shadow map for the boot stack. */
+	bl	pmap_san_bootstrap
+
+	/* Restore bootparams */
+	mov	x0, x19
+#endif
+
 	/* trace back starts here */
 	mov	fp, #0
 	/* Branch to C code */
diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c
--- a/sys/arm64/arm64/machdep.c
+++ b/sys/arm64/arm64/machdep.c
@@ -34,6 +34,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/asan.h>
 #include <sys/buf.h>
 #include <sys/bus.h>
 #include <sys/cons.h>
@@ -955,6 +956,18 @@
 	/*  Do the same for reserve entries in the EFI MEMRESERVE table */
 	if (efi_systbl_phys != 0)
 		exclude_efi_memreserve(efi_systbl_phys);
+
+	/*
+	 * We carefully bootstrap the sanitizer map after we've excluded
+	 * absolutely everything else that could impact phys_avail.  There's not
+	 * always enough room for the initial shadow map after the kernel, so
+	 * we'll end up searching for segments that we can safely use.  Those
+	 * segments also get excluded from phys_avail.
+	 */
+#if defined(KASAN)
+	pmap_bootstrap_san(KERNBASE - abp->kern_delta);
+#endif
+
 	physmem_init_kernel_globals();
 
 	devmap_bootstrap(0, NULL);
@@ -998,6 +1011,7 @@
 	pan_enable();
 
 	kcsan_cpu_init(0);
+	kasan_init();
 
 	env = kern_getenv("kernelname");
 	if (env != NULL)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -108,6 +108,7 @@
 #include "opt_vm.h"
 
 #include <sys/param.h>
+#include <sys/asan.h>
 #include <sys/bitstring.h>
 #include <sys/bus.h>
 #include <sys/systm.h>
@@ -146,6 +147,7 @@
 #include <vm/vm_dumpset.h>
 #include <vm/uma.h>
 
+#include <machine/asan.h>
 #include <machine/machdep.h>
 #include <machine/md_var.h>
 #include <machine/pcb.h>
@@ -190,6 +192,9 @@
 #define	pmap_l1_pindex(v)	(NUL2E + ((v) >> L1_SHIFT))
 #define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
 
+#define	PMAP_SAN_PTE_BITS	(ATTR_DEFAULT | ATTR_S1_XN |	\
+	ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | ATTR_S1_AP(ATTR_S1_AP_RW))
+
 struct pmap_large_md_page {
 	struct rwlock   pv_lock;
 	struct md_page  pv_page;
@@ -1211,6 +1216,54 @@
 		pmap_bootstrap_l2_table(&bs_state);
 }
 
+#ifdef KASAN
+static void
+pmap_bootstrap_allocate_kasan_l2(vm_paddr_t start_pa, vm_paddr_t end_pa,
+    vm_offset_t *start_va, int *nkasan_l2)
+{
+	int i;
+	vm_paddr_t pa;
+	vm_offset_t va;
+	pd_entry_t *l2;
+
+	va = *start_va;
+	pa = rounddown2(end_pa - L2_SIZE, L2_SIZE);
+	l2 = pmap_l2(kernel_pmap, va);
+
+	for (i = 0; pa >= start_pa && i < *nkasan_l2;
+	    i++, va += L2_SIZE, pa -= L2_SIZE, l2++) {
+		/*
+		 * KASAN stack checking results in us having already allocated
+		 * part of our shadow map, so we can just skip those segments.
+		 */
+		if ((pmap_load(l2) & ATTR_DESCR_VALID) != 0) {
+			pa += L2_SIZE;
+			continue;
+		}
+
+		pmap_store(l2, (pa & ~Ln_TABLE_MASK) | PMAP_SAN_PTE_BITS |
+		    L2_BLOCK);
+	}
+
+	/*
+	 * Ended the allocation due to start_pa constraint, rather than because
+	 * we allocated everything.  Adjust back up to the start_pa and remove
+	 * the invalid L2 block from our accounting.
+	 */
+	if (pa < start_pa) {
+		va += L2_SIZE;
+		i--;
+		pa = start_pa;
+	}
+
+	bzero((void *)PHYS_TO_DMAP(pa), i * L2_SIZE);
+	physmem_exclude_region(pa, i * L2_SIZE, EXFLAG_NOALLOC);
+
+	*nkasan_l2 -= i;
+	*start_va = va;
+}
+#endif
+
 /*
  *	Bootstrap the system enough to run with virtual memory.
  */
@@ -1312,6 +1365,68 @@
 	cpu_tlb_flushID();
 }
 
+#if defined(KASAN)
+/*
+ * Finish constructing the initial shadow map:
+ * - Count how many pages from KERNBASE to virtual_avail (scaled for
+ *   shadow map)
+ * - Map that entire range using L2 superpages.
+ */
+void
+pmap_bootstrap_san(vm_paddr_t kernstart)
+{
+	vm_offset_t va;
+	int i, shadow_npages, nkasan_l2;
+
+	/*
+	 * Rebuild physmap one more time, we may have excluded more regions from
+	 * allocation since pmap_bootstrap().
+	 */
+	bzero(physmap, sizeof(physmap));
+	physmap_idx = physmem_avail(physmap, nitems(physmap));
+	physmap_idx /= 2;
+
+	shadow_npages = (virtual_avail - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE;
+	shadow_npages = howmany(shadow_npages, KASAN_SHADOW_SCALE);
+	nkasan_l2 = howmany(shadow_npages, Ln_ENTRIES);
+
+	/* Map the valid KVA up to this point. */
+	va = KASAN_MIN_ADDRESS;
+
+	/*
+	 * Find a slot in the physmap large enough for what we needed.  We try to put
+	 * the shadow map as high up as we can to avoid depleting the lower 4GB in case
+	 * it's needed for, e.g., an xhci controller that can only do 32-bit DMA.
+	 */
+	for (i = (physmap_idx * 2) - 2; i >= 0 && nkasan_l2 > 0; i -= 2) {
+		vm_paddr_t plow, phigh;
+
+		/* L2 mappings must be backed by memory that is L2-aligned */
+		plow = roundup2(physmap[i], L2_SIZE);
+		phigh = physmap[i + 1];
+		if (plow >= phigh)
+			continue;
+		if (kernstart >= plow && kernstart < phigh)
+			phigh = kernstart;
+		if (phigh - plow >= L2_SIZE)
+			pmap_bootstrap_allocate_kasan_l2(plow, phigh, &va,
+			    &nkasan_l2);
+	}
+
+	if (nkasan_l2 != 0)
+		panic("Could not find phys region for shadow map");
+
+	/*
+	 * Done. We should now have a valid shadow address mapped for all KVA
+	 * that has been mapped so far, i.e., KERNBASE to virtual_avail. Thus,
+	 * shadow accesses by the kasan(9) runtime will succeed for this range.
+	 * When the kernel virtual address range is later expanded, as will
+	 * happen in vm_mem_init(), the shadow map will be grown as well. This
+	 * is handled by pmap_san_enter().
+	 */
+}
+#endif
+
 /*
  *	Initialize a vm_page's machine-dependent fields.
  */
@@ -2580,6 +2695,8 @@
 	addr = roundup2(addr, L2_SIZE);
 	if (addr - 1 >= vm_map_max(kernel_map))
 		addr = vm_map_max(kernel_map);
+	if (kernel_vm_end < addr)
+		kasan_shadow_map(kernel_vm_end, addr - kernel_vm_end);
 	while (kernel_vm_end < addr) {
 		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
 		KASSERT(pmap_load(l0) != 0,
@@ -7556,6 +7673,151 @@
 	return (mode >= VM_MEMATTR_DEVICE && mode <= VM_MEMATTR_WRITE_THROUGH);
 }
 
+#if defined(KASAN)
+static vm_paddr_t	pmap_san_early_kernstart;
+static pd_entry_t	*pmap_san_early_l2;
+
+void __nosanitizeaddress
+pmap_san_bootstrap(struct arm64_bootparams *abp)
+{
+
+	pmap_san_early_kernstart = KERNBASE - abp->kern_delta;
+	kasan_init_early(abp->kern_stack, KSTACK_PAGES * PAGE_SIZE);
+}
+
+#define	SAN_BOOTSTRAP_L2_SIZE	(1 * L2_SIZE)
+#define	SAN_BOOTSTRAP_SIZE	(2 * PAGE_SIZE)
+static vm_offset_t __nosanitizeaddress
+pmap_san_enter_bootstrap_alloc_l2(void)
+{
+	static uint8_t bootstrap_data[SAN_BOOTSTRAP_L2_SIZE] __aligned(L2_SIZE);
+	static size_t offset = 0;
+	vm_offset_t addr;
+
+	if (offset + L2_SIZE > sizeof(bootstrap_data)) {
+		panic("%s: out of memory for the bootstrap shadow map L2 entries",
+		    __func__);
+	}
+
+	addr = (uintptr_t)&bootstrap_data[offset];
+	offset += L2_SIZE;
+	return (addr);
+}
+
+/*
+ * SAN L1 + L2 pages, maybe L3 entries later?
+ */
+static vm_offset_t __nosanitizeaddress
+pmap_san_enter_bootstrap_alloc_pages(int npages)
+{
+	static uint8_t bootstrap_data[SAN_BOOTSTRAP_SIZE] __aligned(PAGE_SIZE);
+	static size_t offset = 0;
+	vm_offset_t addr;
+
+	if (offset + (npages * PAGE_SIZE) > sizeof(bootstrap_data)) {
+		panic("%s: out of memory for the bootstrap shadow map",
+		    __func__);
+	}
+
+	addr = (uintptr_t)&bootstrap_data[offset];
+	offset += (npages * PAGE_SIZE);
+	return (addr);
+}
+
+static void __nosanitizeaddress
+pmap_san_enter_bootstrap(void)
+{
+	vm_offset_t freemempos;
+
+	/* L1, L2 */
+	freemempos = pmap_san_enter_bootstrap_alloc_pages(2);
+	bs_state.freemempos = freemempos;
+	bs_state.va = KASAN_MIN_ADDRESS;
+	pmap_bootstrap_l1_table(&bs_state);
+	pmap_san_early_l2 = bs_state.l2;
+}
+
+static vm_page_t
+pmap_san_enter_alloc_l3(void)
+{
+	vm_page_t m;
+
+	m = vm_page_alloc_noobj(VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED |
+	    VM_ALLOC_ZERO);
+	if (m == NULL)
+		panic("%s: no memory to grow shadow map", __func__);
+	return (m);
+}
+
+static vm_page_t
+pmap_san_enter_alloc_l2(void)
+{
+	return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
+	    Ln_ENTRIES, 0, ~0ul, L2_SIZE, 0, VM_MEMATTR_DEFAULT));
+}
+
+void __nosanitizeaddress
+pmap_san_enter(vm_offset_t va)
+{
+	pd_entry_t *l1, *l2;
+	pt_entry_t *l3;
+	vm_page_t m;
+
+	if (virtual_avail == 0) {
+		vm_offset_t block;
+		int slot;
+		bool first;
+
+		/* Temporary shadow map prior to pmap_bootstrap(). */
+		first = pmap_san_early_l2 == NULL;
+		if (first)
+			pmap_san_enter_bootstrap();
+
+		l2 = pmap_san_early_l2;
+		slot = pmap_l2_index(va);
+
+		if ((pmap_load(&l2[slot]) & ATTR_DESCR_VALID) == 0) {
+			MPASS(first);
+			block = pmap_san_enter_bootstrap_alloc_l2();
+			pmap_store(&l2[slot], pmap_early_vtophys(block) |
+			    PMAP_SAN_PTE_BITS | L2_BLOCK);
+			dmb(ishst);
+		}
+
+		return;
+	}
+
+	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+	l1 = pmap_l1(kernel_pmap, va);
+	MPASS(l1 != NULL);
+	if ((pmap_load(l1) & ATTR_DESCR_VALID) == 0) {
+		m = pmap_san_enter_alloc_l3();
+		pmap_store(l1, (VM_PAGE_TO_PHYS(m) & ~Ln_TABLE_MASK) |
+		    L1_TABLE);
+	}
+	l2 = pmap_l1_to_l2(l1, va);
+	if ((pmap_load(l2) & ATTR_DESCR_VALID) == 0) {
+		m = pmap_san_enter_alloc_l2();
+		if (m != NULL) {
+			pmap_store(l2, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS |
+			    L2_BLOCK);
+		} else {
+			m = pmap_san_enter_alloc_l3();
+			pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
+		}
+		dmb(ishst);
+	}
+	if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK)
+		return;
+	l3 = pmap_l2_to_l3(l2, va);
+	if ((pmap_load(l3) & ATTR_DESCR_VALID) != 0)
+		return;
+	m = pmap_san_enter_alloc_l3();
+	pmap_store(l3, VM_PAGE_TO_PHYS(m) | PMAP_SAN_PTE_BITS | L3_PAGE);
+	dmb(ishst);
+}
+#endif /* KASAN */
+
 /*
  * Track a range of the kernel's virtual address space that is contiguous
  * in various mapping attributes.
@@ -7724,6 +7986,10 @@
 			sbuf_printf(sb, "\nDirect map:\n");
 		else if (i == pmap_l0_index(VM_MIN_KERNEL_ADDRESS))
 			sbuf_printf(sb, "\nKernel map:\n");
+#ifdef KASAN
+		else if (i == pmap_l0_index(KASAN_MIN_ADDRESS))
+			sbuf_printf(sb, "\nKASAN shadow map:\n");
+#endif
 
 		l0e = kernel_pmap->pm_l0[i];
 		if ((l0e & ATTR_DESCR_VALID) == 0) {
diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c
--- a/sys/arm64/arm64/trap.c
+++ b/sys/arm64/arm64/trap.c
@@ -32,6 +32,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/asan.h>
 #include <sys/kernel.h>
 #include <sys/ktr.h>
 #include <sys/lock.h>
@@ -465,6 +466,8 @@
 	uint64_t esr, far;
 	int dfsc;
 
+	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+
 	/* Read the esr register to get the exception details */
 	esr = frame->tf_esr;
 	exception = ESR_ELx_EXCEPTION(esr);
@@ -571,6 +574,8 @@
 	    ("Invalid pcpu address from userland: %p (tpidr %lx)",
 	     get_pcpu(), READ_SPECIALREG(tpidr_el1)));
 
+	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+
 	esr = frame->tf_esr;
 	exception = ESR_ELx_EXCEPTION(esr);
 	if (exception == EXCP_INSN_ABORT_L && far > VM_MAXUSER_ADDRESS) {
@@ -711,6 +716,8 @@
 {
 	uint64_t esr, far;
 
+	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+
 	far = READ_SPECIALREG(far_el1);
 	esr = frame->tf_esr;
 
@@ -725,6 +732,8 @@
 {
 	uint64_t esr, far;
 
+	kasan_mark(frame, sizeof(*frame), sizeof(*frame), 0);
+
 	far = READ_SPECIALREG(far_el1);
 	esr = frame->tf_esr;
 
diff --git a/sys/arm64/include/asan.h b/sys/arm64/include/asan.h
new file mode 100644
--- /dev/null
+++ b/sys/arm64/include/asan.h
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2020 The FreeBSD Foundation
+ *
+ * This software was developed by Mark Johnston under sponsorship from the
+ * FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_ASAN_H_
+#define	_MACHINE_ASAN_H_
+
+#ifdef KASAN
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_page.h>
+#include <machine/vmparam.h>
+
+static inline vm_offset_t
+kasan_md_addr_to_shad(vm_offset_t addr)
+{
+	return (((addr - VM_MIN_KERNEL_ADDRESS) >> KASAN_SHADOW_SCALE_SHIFT) +
+	    KASAN_MIN_ADDRESS);
+}
+
+static inline bool
+kasan_md_unsupported(vm_offset_t addr)
+{
+	return (addr < VM_MIN_KERNEL_ADDRESS || addr >= virtual_end);
+}
+
+static inline void
+kasan_md_init(void)
+{
+
+}
+
+static inline void
+kasan_md_init_early(vm_offset_t bootstack, size_t size)
+{
+
+	kasan_shadow_map(bootstack, size);
+}
+
+#endif /* KASAN */
+#endif /* !_MACHINE_ASAN_H_ */
diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h
--- a/sys/arm64/include/atomic.h
+++ b/sys/arm64/include/atomic.h
@@ -53,6 +53,10 @@
 #define	wmb()	dmb(st)	/* Full system memory barrier store */
 #define	rmb()	dmb(ld)	/* Full system memory barrier load */
 
+#ifdef _KERNEL
+extern bool lse_supported;
+#endif
+
 #if defined(SAN_NEEDS_INTERCEPTORS) && !defined(SAN_RUNTIME)
 #include <sys/atomic_san.h>
 #else
@@ -60,7 +64,6 @@
 #include <sys/atomic_common.h>
 
 #ifdef _KERNEL
-extern bool lse_supported;
 
 #ifdef LSE_ATOMICS
 #define	_ATOMIC_LSE_SUPPORTED	1
diff --git a/sys/arm64/include/param.h b/sys/arm64/include/param.h
--- a/sys/arm64/include/param.h
+++ b/sys/arm64/include/param.h
@@ -99,8 +99,12 @@
 #define	MAXPAGESIZES	3		/* maximum number of supported page sizes */
 
 #ifndef KSTACK_PAGES
+#if defined(KASAN) || defined(KMSAN)
+#define	KSTACK_PAGES	6
+#else
 #define	KSTACK_PAGES	4	/* pages of kernel stack (with pcb) */
 #endif
+#endif
 
 #define	KSTACK_GUARD_PAGES	1	/* pages of kstack guard; 0 disables */
 #define	PCPU_PAGES		1
diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h
--- a/sys/arm64/include/pmap.h
+++ b/sys/arm64/include/pmap.h
@@ -183,6 +183,14 @@
 	return (0);
 }
 
+#if defined(KASAN) || defined(KMSAN)
+struct arm64_bootparams;
+
+void	pmap_bootstrap_san(vm_paddr_t);
+void	pmap_san_enter(vm_offset_t);
+void	pmap_san_bootstrap(struct arm64_bootparams *);
+#endif
+
 #endif	/* _KERNEL */
 
 #endif	/* !LOCORE */
diff --git a/sys/arm64/include/vmparam.h b/sys/arm64/include/vmparam.h
--- a/sys/arm64/include/vmparam.h
+++ b/sys/arm64/include/vmparam.h
@@ -125,7 +125,10 @@
  * Upper region:    0xffffffffffffffff  Top of virtual memory
  *
  *                  0xfffffeffffffffff  End of DMAP
- *                  0xfffffa0000000000  Start of DMAP
+ *                  0xffffa00000000000  Start of DMAP
+ *
+ *                  0xffff009fffffffff  End of KASAN shadow map
+ *                  0xffff008000000000  Start of KASAN shadow map
  *
  *                  0xffff007fffffffff  End of KVA
  *                  0xffff000000000000  Kernel base address & start of KVA
@@ -156,6 +159,10 @@
 #define	VM_MIN_KERNEL_ADDRESS	(0xffff000000000000UL)
 #define	VM_MAX_KERNEL_ADDRESS	(0xffff008000000000UL)
 
+/* 128 GiB KASAN shadow map */
+#define	KASAN_MIN_ADDRESS	(0xffff008000000000UL)
+#define	KASAN_MAX_ADDRESS	(0xffff00a000000000UL)
+
 /* The address bits that hold a pointer authentication code */
 #define	PAC_ADDR_MASK		(0xff7f000000000000UL)
 
@@ -239,7 +246,9 @@
 #define	VM_INITIAL_PAGEIN	16
 #endif
 
+#if !defined(KASAN) && !defined(KMSAN)
 #define	UMA_MD_SMALL_ALLOC
+#endif
 
 #ifndef LOCORE
 
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -80,7 +80,8 @@
 arm64/arm64/uio_machdep.c			standard
 arm64/arm64/uma_machdep.c			standard
 arm64/arm64/undefined.c				standard
-arm64/arm64/unwind.c				optional ddb | kdtrace_hooks | stack
+arm64/arm64/unwind.c				optional ddb | kdtrace_hooks | stack \
+	compile-with "${NORMAL_C:N-fsanitize*}"
 arm64/arm64/vfp.c				standard
 arm64/arm64/vm_machdep.c			standard
 
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -102,6 +102,17 @@
 		-mllvm -asan-use-after-scope=true \
 		-mllvm -asan-instrumentation-with-call-threshold=0 \
 		-mllvm -asan-instrument-byval=false
+
+.if ${MACHINE_CPUARCH} == "aarch64"
+# KASAN/ARM64 TODO: -asan-mapping-offset is calculated from:
+#	   (VM_KERNEL_MIN_ADDRESS >> KASAN_SHADOW_SCALE_SHIFT) + $offset = KASAN_MIN_ADDRESS
+#
+#	This is different than amd64, where we have a different
+#	KASAN_MIN_ADDRESS, and this offset value should eventually be
+#	upstreamed similar to: https://reviews.llvm.org/D98285
+#
+SAN_CFLAGS+=	-mllvm -asan-mapping-offset=0xdfff208000000000
+.endif
 .endif
 
 KCSAN_ENABLED!=	grep KCSAN opt_global.h || true ; echo
diff --git a/sys/kern/subr_intr.c b/sys/kern/subr_intr.c
--- a/sys/kern/subr_intr.c
+++ b/sys/kern/subr_intr.c
@@ -42,6 +42,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/asan.h>
 #include <sys/bitstring.h>
 #include <sys/bus.h>
 #include <sys/conf.h>
@@ -345,6 +346,8 @@
 
 	KASSERT(irq_root_filter != NULL, ("%s: no filter", __func__));
 
+	kasan_mark(tf, sizeof(*tf), sizeof(*tf), 0);
+
 	VM_CNT_INC(v_intr);
 	critical_enter();
 	td = curthread;