diff --git a/sys/kern/subr_asan.c b/sys/kern/subr_asan.c index 7083a8e64540..5441d7be39a1 100644 --- a/sys/kern/subr_asan.c +++ b/sys/kern/subr_asan.c @@ -1,1097 +1,1180 @@ /* $NetBSD: subr_asan.c,v 1.26 2020/09/10 14:10:46 maxv Exp $ */ /* * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net * All rights reserved. * * This code is part of the KASAN subsystem of the NetBSD kernel. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #define SAN_RUNTIME #include __FBSDID("$FreeBSD$"); #if 0 __KERNEL_RCSID(0, "$NetBSD: subr_asan.c,v 1.26 2020/09/10 14:10:46 maxv Exp $"); #endif #include #include #include #include #include #include #include /* ASAN constants. Part of the compiler ABI. */ #define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE - 1) #define KASAN_ALLOCA_SCALE_SIZE 32 /* ASAN ABI version. */ #if defined(__clang__) && (__clang_major__ - 0 >= 6) #define ASAN_ABI_VERSION 8 #elif __GNUC_PREREQ__(7, 1) && !defined(__clang__) #define ASAN_ABI_VERSION 8 #elif __GNUC_PREREQ__(6, 1) && !defined(__clang__) #define ASAN_ABI_VERSION 6 #else #error "Unsupported compiler version" #endif #define __RET_ADDR (unsigned long)__builtin_return_address(0) /* Global variable descriptor. Part of the compiler ABI. */ struct __asan_global_source_location { const char *filename; int line_no; int column_no; }; struct __asan_global { const void *beg; /* address of the global variable */ size_t size; /* size of the global variable */ size_t size_with_redzone; /* size with the redzone */ const void *name; /* name of the variable */ const void *module_name; /* name of the module where the var is declared */ unsigned long has_dynamic_init; /* the var has dyn initializer (c++) */ struct __asan_global_source_location *location; #if ASAN_ABI_VERSION >= 7 uintptr_t odr_indicator; /* the address of the ODR indicator symbol */ #endif }; FEATURE(kasan, "Kernel address sanitizer"); static SYSCTL_NODE(_debug, OID_AUTO, kasan, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "KASAN options"); static int panic_on_violation = 1; SYSCTL_INT(_debug_kasan, OID_AUTO, panic_on_violation, CTLFLAG_RDTUN, &panic_on_violation, 0, "Panic if an invalid access is detected"); static bool kasan_enabled __read_mostly = false; /* -------------------------------------------------------------------------- */ void kasan_shadow_map(vm_offset_t addr, size_t size) { size_t sz, npages, i; vm_offset_t sva, eva; KASSERT(addr % KASAN_SHADOW_SCALE == 0, ("%s: invalid address %#lx", __func__, addr)); sz = roundup(size, KASAN_SHADOW_SCALE) / KASAN_SHADOW_SCALE; sva = kasan_md_addr_to_shad(addr); eva = kasan_md_addr_to_shad(addr) + sz; sva = rounddown(sva, PAGE_SIZE); eva = roundup(eva, PAGE_SIZE); npages = (eva - sva) / PAGE_SIZE; KASSERT(sva >= KASAN_MIN_ADDRESS && eva < KASAN_MAX_ADDRESS, ("%s: invalid address range %#lx-%#lx", __func__, sva, eva)); for (i = 0; i < npages; i++) pmap_kasan_enter(sva + ptoa(i)); } void kasan_init(void) { int disabled; disabled = 0; TUNABLE_INT_FETCH("debug.kasan.disabled", &disabled); if (disabled) return; /* MD initialization. */ kasan_md_init(); /* Now officially enabled. */ kasan_enabled = true; } static inline const char * kasan_code_name(uint8_t code) { switch (code) { case KASAN_GENERIC_REDZONE: return "GenericRedZone"; case KASAN_MALLOC_REDZONE: return "MallocRedZone"; case KASAN_KMEM_REDZONE: return "KmemRedZone"; case KASAN_UMA_FREED: return "UMAUseAfterFree"; case KASAN_KSTACK_FREED: return "KernelStack"; case KASAN_EXEC_ARGS_FREED: return "ExecKVA"; case 1 ... 7: return "RedZonePartial"; case KASAN_STACK_LEFT: return "StackLeft"; case KASAN_STACK_MID: return "StackMiddle"; case KASAN_STACK_RIGHT: return "StackRight"; case KASAN_USE_AFTER_RET: return "UseAfterRet"; case KASAN_USE_AFTER_SCOPE: return "UseAfterScope"; default: return "Unknown"; } } #define REPORT(f, ...) do { \ if (panic_on_violation) { \ kasan_enabled = false; \ panic(f, __VA_ARGS__); \ } else { \ struct stack st; \ \ stack_save(&st); \ printf(f "\n", __VA_ARGS__); \ stack_print_ddb(&st); \ } \ } while (0) static void kasan_report(unsigned long addr, size_t size, bool write, unsigned long pc, uint8_t code) { REPORT("ASan: Invalid access, %zu-byte %s at %#lx, %s(%x)", size, (write ? "write" : "read"), addr, kasan_code_name(code), code); } static __always_inline void kasan_shadow_1byte_markvalid(unsigned long addr) { int8_t *byte = (int8_t *)kasan_md_addr_to_shad(addr); int8_t last = (addr & KASAN_SHADOW_MASK) + 1; *byte = last; } static __always_inline void kasan_shadow_Nbyte_markvalid(const void *addr, size_t size) { size_t i; for (i = 0; i < size; i++) { kasan_shadow_1byte_markvalid((unsigned long)addr + i); } } static __always_inline void kasan_shadow_Nbyte_fill(const void *addr, size_t size, uint8_t code) { void *shad; if (__predict_false(size == 0)) return; if (__predict_false(kasan_md_unsupported((vm_offset_t)addr))) return; KASSERT((vm_offset_t)addr % KASAN_SHADOW_SCALE == 0, ("%s: invalid address %p", __func__, addr)); KASSERT(size % KASAN_SHADOW_SCALE == 0, ("%s: invalid size %zu", __func__, size)); shad = (void *)kasan_md_addr_to_shad((uintptr_t)addr); size = size >> KASAN_SHADOW_SCALE_SHIFT; __builtin_memset(shad, code, size); } /* * In an area of size 'sz_with_redz', mark the 'size' first bytes as valid, * and the rest as invalid. There are generally two use cases: * * o kasan_mark(addr, origsize, size, code), with origsize < size. This marks * the redzone at the end of the buffer as invalid. If the entire is to be * marked invalid, origsize will be 0. * * o kasan_mark(addr, size, size, 0). This marks the entire buffer as valid. */ void kasan_mark(const void *addr, size_t size, size_t redzsize, uint8_t code) { size_t i, n, redz; int8_t *shad; if ((vm_offset_t)addr >= DMAP_MIN_ADDRESS && (vm_offset_t)addr < DMAP_MAX_ADDRESS) return; KASSERT((vm_offset_t)addr >= VM_MIN_KERNEL_ADDRESS && (vm_offset_t)addr < VM_MAX_KERNEL_ADDRESS, ("%s: invalid address %p", __func__, addr)); KASSERT((vm_offset_t)addr % KASAN_SHADOW_SCALE == 0, ("%s: invalid address %p", __func__, addr)); redz = redzsize - roundup(size, KASAN_SHADOW_SCALE); KASSERT(redz % KASAN_SHADOW_SCALE == 0, ("%s: invalid size %zu", __func__, redz)); shad = (int8_t *)kasan_md_addr_to_shad((uintptr_t)addr); /* Chunks of 8 bytes, valid. */ n = size / KASAN_SHADOW_SCALE; for (i = 0; i < n; i++) { *shad++ = 0; } /* Possibly one chunk, mid. */ if ((size & KASAN_SHADOW_MASK) != 0) { *shad++ = (size & KASAN_SHADOW_MASK); } /* Chunks of 8 bytes, invalid. */ n = redz / KASAN_SHADOW_SCALE; for (i = 0; i < n; i++) { *shad++ = code; } } /* -------------------------------------------------------------------------- */ #define ADDR_CROSSES_SCALE_BOUNDARY(addr, size) \ (addr >> KASAN_SHADOW_SCALE_SHIFT) != \ ((addr + size - 1) >> KASAN_SHADOW_SCALE_SHIFT) static __always_inline bool kasan_shadow_1byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte = (int8_t *)kasan_md_addr_to_shad(addr); int8_t last = (addr & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_2byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte, last; if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 2)) { return (kasan_shadow_1byte_isvalid(addr, code) && kasan_shadow_1byte_isvalid(addr+1, code)); } byte = (int8_t *)kasan_md_addr_to_shad(addr); last = ((addr + 1) & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_4byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte, last; if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 4)) { return (kasan_shadow_2byte_isvalid(addr, code) && kasan_shadow_2byte_isvalid(addr+2, code)); } byte = (int8_t *)kasan_md_addr_to_shad(addr); last = ((addr + 3) & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_8byte_isvalid(unsigned long addr, uint8_t *code) { int8_t *byte, last; if (ADDR_CROSSES_SCALE_BOUNDARY(addr, 8)) { return (kasan_shadow_4byte_isvalid(addr, code) && kasan_shadow_4byte_isvalid(addr+4, code)); } byte = (int8_t *)kasan_md_addr_to_shad(addr); last = ((addr + 7) & KASAN_SHADOW_MASK) + 1; if (__predict_true(*byte == 0 || last <= *byte)) { return (true); } *code = *byte; return (false); } static __always_inline bool kasan_shadow_Nbyte_isvalid(unsigned long addr, size_t size, uint8_t *code) { size_t i; for (i = 0; i < size; i++) { if (!kasan_shadow_1byte_isvalid(addr+i, code)) return (false); } return (true); } static __always_inline void kasan_shadow_check(unsigned long addr, size_t size, bool write, unsigned long retaddr) { uint8_t code; bool valid; if (__predict_false(!kasan_enabled)) return; if (__predict_false(size == 0)) return; if (__predict_false(kasan_md_unsupported(addr))) return; if (__predict_false(panicstr != NULL)) return; if (__builtin_constant_p(size)) { switch (size) { case 1: valid = kasan_shadow_1byte_isvalid(addr, &code); break; case 2: valid = kasan_shadow_2byte_isvalid(addr, &code); break; case 4: valid = kasan_shadow_4byte_isvalid(addr, &code); break; case 8: valid = kasan_shadow_8byte_isvalid(addr, &code); break; default: valid = kasan_shadow_Nbyte_isvalid(addr, size, &code); break; } } else { valid = kasan_shadow_Nbyte_isvalid(addr, size, &code); } if (__predict_false(!valid)) { kasan_report(addr, size, write, retaddr, code); } } /* -------------------------------------------------------------------------- */ void * kasan_memcpy(void *dst, const void *src, size_t len) { kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR); kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR); return (__builtin_memcpy(dst, src, len)); } int kasan_memcmp(const void *b1, const void *b2, size_t len) { kasan_shadow_check((unsigned long)b1, len, false, __RET_ADDR); kasan_shadow_check((unsigned long)b2, len, false, __RET_ADDR); return (__builtin_memcmp(b1, b2, len)); } void * kasan_memset(void *b, int c, size_t len) { kasan_shadow_check((unsigned long)b, len, true, __RET_ADDR); return (__builtin_memset(b, c, len)); } void * kasan_memmove(void *dst, const void *src, size_t len) { kasan_shadow_check((unsigned long)src, len, false, __RET_ADDR); kasan_shadow_check((unsigned long)dst, len, true, __RET_ADDR); return (__builtin_memmove(dst, src, len)); } size_t kasan_strlen(const char *str) { const char *s; s = str; while (1) { kasan_shadow_check((unsigned long)s, 1, false, __RET_ADDR); if (*s == '\0') break; s++; } return (s - str); } char * kasan_strcpy(char *dst, const char *src) { char *save = dst; while (1) { kasan_shadow_check((unsigned long)src, 1, false, __RET_ADDR); kasan_shadow_check((unsigned long)dst, 1, true, __RET_ADDR); *dst = *src; if (*src == '\0') break; src++, dst++; } return save; } int kasan_strcmp(const char *s1, const char *s2) { while (1) { kasan_shadow_check((unsigned long)s1, 1, false, __RET_ADDR); kasan_shadow_check((unsigned long)s2, 1, false, __RET_ADDR); if (*s1 != *s2) break; if (*s1 == '\0') return 0; s1++, s2++; } return (*(const unsigned char *)s1 - *(const unsigned char *)s2); } int kasan_copyin(const void *uaddr, void *kaddr, size_t len) { kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR); return (copyin(uaddr, kaddr, len)); } int kasan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) { kasan_shadow_check((unsigned long)kaddr, len, true, __RET_ADDR); return (copyinstr(uaddr, kaddr, len, done)); } int kasan_copyout(const void *kaddr, void *uaddr, size_t len) { kasan_shadow_check((unsigned long)kaddr, len, false, __RET_ADDR); return (copyout(kaddr, uaddr, len)); } /* -------------------------------------------------------------------------- */ +int +kasan_fubyte(volatile const void *base) +{ + return (fubyte(base)); +} + +int +kasan_fuword16(volatile const void *base) +{ + return (fuword16(base)); +} + +int +kasan_fueword(volatile const void *base, long *val) +{ + kasan_shadow_check((unsigned long)val, sizeof(*val), true, __RET_ADDR); + return (fueword(base, val)); +} + +int +kasan_fueword32(volatile const void *base, int32_t *val) +{ + kasan_shadow_check((unsigned long)val, sizeof(*val), true, __RET_ADDR); + return (fueword32(base, val)); +} + +int +kasan_fueword64(volatile const void *base, int64_t *val) +{ + kasan_shadow_check((unsigned long)val, sizeof(*val), true, __RET_ADDR); + return (fueword64(base, val)); +} + +int +kasan_subyte(volatile void *base, int byte) +{ + return (subyte(base, byte)); +} + +int +kasan_suword(volatile void *base, long word) +{ + return (suword(base, word)); +} + +int +kasan_suword16(volatile void *base, int word) +{ + return (suword16(base, word)); +} + +int +kasan_suword32(volatile void *base, int32_t word) +{ + return (suword32(base, word)); +} + +int +kasan_suword64(volatile void *base, int64_t word) +{ + return (suword64(base, word)); +} + +int +kasan_casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, + uint32_t newval) +{ + kasan_shadow_check((unsigned long)oldvalp, sizeof(*oldvalp), true, + __RET_ADDR); + return (casueword32(base, oldval, oldvalp, newval)); +} + +int +kasan_casueword(volatile u_long *base, u_long oldval, u_long *oldvalp, + u_long newval) +{ + kasan_shadow_check((unsigned long)oldvalp, sizeof(*oldvalp), true, + __RET_ADDR); + return (casueword(base, oldval, oldvalp, newval)); +} + +/* -------------------------------------------------------------------------- */ + #include #include #define _ASAN_ATOMIC_FUNC_ADD(name, type) \ void kasan_atomic_add_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_add_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_ADD(name, type) \ _ASAN_ATOMIC_FUNC_ADD(name, type) \ _ASAN_ATOMIC_FUNC_ADD(acq_##name, type) \ _ASAN_ATOMIC_FUNC_ADD(rel_##name, type) #define _ASAN_ATOMIC_FUNC_SUBTRACT(name, type) \ void kasan_atomic_subtract_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_subtract_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _ASAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _ASAN_ATOMIC_FUNC_SUBTRACT(acq_##name, type) \ _ASAN_ATOMIC_FUNC_SUBTRACT(rel_##name, type) #define _ASAN_ATOMIC_FUNC_SET(name, type) \ void kasan_atomic_set_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_set_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_SET(name, type) \ _ASAN_ATOMIC_FUNC_SET(name, type) \ _ASAN_ATOMIC_FUNC_SET(acq_##name, type) \ _ASAN_ATOMIC_FUNC_SET(rel_##name, type) #define _ASAN_ATOMIC_FUNC_CLEAR(name, type) \ void kasan_atomic_clear_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_clear_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_CLEAR(name, type) \ _ASAN_ATOMIC_FUNC_CLEAR(name, type) \ _ASAN_ATOMIC_FUNC_CLEAR(acq_##name, type) \ _ASAN_ATOMIC_FUNC_CLEAR(rel_##name, type) #define ASAN_ATOMIC_FUNC_FETCHADD(name, type) \ type kasan_atomic_fetchadd_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_fetchadd_##name(ptr, val)); \ } #define ASAN_ATOMIC_FUNC_READANDCLEAR(name, type) \ type kasan_atomic_readandclear_##name(volatile type *ptr) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_readandclear_##name(ptr)); \ } #define ASAN_ATOMIC_FUNC_TESTANDCLEAR(name, type) \ int kasan_atomic_testandclear_##name(volatile type *ptr, u_int v) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_testandclear_##name(ptr, v)); \ } #define ASAN_ATOMIC_FUNC_TESTANDSET(name, type) \ int kasan_atomic_testandset_##name(volatile type *ptr, u_int v) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_testandset_##name(ptr, v)); \ } #define ASAN_ATOMIC_FUNC_SWAP(name, type) \ type kasan_atomic_swap_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_swap_##name(ptr, val)); \ } #define _ASAN_ATOMIC_FUNC_CMPSET(name, type) \ int kasan_atomic_cmpset_##name(volatile type *ptr, type oval, \ type nval) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_cmpset_##name(ptr, oval, nval)); \ } #define ASAN_ATOMIC_FUNC_CMPSET(name, type) \ _ASAN_ATOMIC_FUNC_CMPSET(name, type) \ _ASAN_ATOMIC_FUNC_CMPSET(acq_##name, type) \ _ASAN_ATOMIC_FUNC_CMPSET(rel_##name, type) #define _ASAN_ATOMIC_FUNC_FCMPSET(name, type) \ int kasan_atomic_fcmpset_##name(volatile type *ptr, type *oval, \ type nval) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_fcmpset_##name(ptr, oval, nval)); \ } #define ASAN_ATOMIC_FUNC_FCMPSET(name, type) \ _ASAN_ATOMIC_FUNC_FCMPSET(name, type) \ _ASAN_ATOMIC_FUNC_FCMPSET(acq_##name, type) \ _ASAN_ATOMIC_FUNC_FCMPSET(rel_##name, type) #define ASAN_ATOMIC_FUNC_THREAD_FENCE(name) \ void kasan_atomic_thread_fence_##name(void) \ { \ atomic_thread_fence_##name(); \ } #define _ASAN_ATOMIC_FUNC_LOAD(name, type) \ type kasan_atomic_load_##name(volatile type *ptr) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ return (atomic_load_##name(ptr)); \ } #define ASAN_ATOMIC_FUNC_LOAD(name, type) \ _ASAN_ATOMIC_FUNC_LOAD(name, type) \ _ASAN_ATOMIC_FUNC_LOAD(acq_##name, type) #define _ASAN_ATOMIC_FUNC_STORE(name, type) \ void kasan_atomic_store_##name(volatile type *ptr, type val) \ { \ kasan_shadow_check((uintptr_t)ptr, sizeof(type), true, \ __RET_ADDR); \ atomic_store_##name(ptr, val); \ } #define ASAN_ATOMIC_FUNC_STORE(name, type) \ _ASAN_ATOMIC_FUNC_STORE(name, type) \ _ASAN_ATOMIC_FUNC_STORE(rel_##name, type) ASAN_ATOMIC_FUNC_ADD(8, uint8_t); ASAN_ATOMIC_FUNC_ADD(16, uint16_t); ASAN_ATOMIC_FUNC_ADD(32, uint32_t); ASAN_ATOMIC_FUNC_ADD(64, uint64_t); ASAN_ATOMIC_FUNC_ADD(int, u_int); ASAN_ATOMIC_FUNC_ADD(long, u_long); ASAN_ATOMIC_FUNC_ADD(ptr, uintptr_t); ASAN_ATOMIC_FUNC_SUBTRACT(8, uint8_t); ASAN_ATOMIC_FUNC_SUBTRACT(16, uint16_t); ASAN_ATOMIC_FUNC_SUBTRACT(32, uint32_t); ASAN_ATOMIC_FUNC_SUBTRACT(64, uint64_t); ASAN_ATOMIC_FUNC_SUBTRACT(int, u_int); ASAN_ATOMIC_FUNC_SUBTRACT(long, u_long); ASAN_ATOMIC_FUNC_SUBTRACT(ptr, uintptr_t); ASAN_ATOMIC_FUNC_SET(8, uint8_t); ASAN_ATOMIC_FUNC_SET(16, uint16_t); ASAN_ATOMIC_FUNC_SET(32, uint32_t); ASAN_ATOMIC_FUNC_SET(64, uint64_t); ASAN_ATOMIC_FUNC_SET(int, u_int); ASAN_ATOMIC_FUNC_SET(long, u_long); ASAN_ATOMIC_FUNC_SET(ptr, uintptr_t); ASAN_ATOMIC_FUNC_CLEAR(8, uint8_t); ASAN_ATOMIC_FUNC_CLEAR(16, uint16_t); ASAN_ATOMIC_FUNC_CLEAR(32, uint32_t); ASAN_ATOMIC_FUNC_CLEAR(64, uint64_t); ASAN_ATOMIC_FUNC_CLEAR(int, u_int); ASAN_ATOMIC_FUNC_CLEAR(long, u_long); ASAN_ATOMIC_FUNC_CLEAR(ptr, uintptr_t); ASAN_ATOMIC_FUNC_FETCHADD(32, uint32_t); ASAN_ATOMIC_FUNC_FETCHADD(64, uint64_t); ASAN_ATOMIC_FUNC_FETCHADD(int, u_int); ASAN_ATOMIC_FUNC_FETCHADD(long, u_long); ASAN_ATOMIC_FUNC_READANDCLEAR(32, uint32_t); ASAN_ATOMIC_FUNC_READANDCLEAR(64, uint64_t); ASAN_ATOMIC_FUNC_READANDCLEAR(int, u_int); ASAN_ATOMIC_FUNC_READANDCLEAR(long, u_long); ASAN_ATOMIC_FUNC_READANDCLEAR(ptr, uintptr_t); ASAN_ATOMIC_FUNC_TESTANDCLEAR(32, uint32_t); ASAN_ATOMIC_FUNC_TESTANDCLEAR(64, uint64_t); ASAN_ATOMIC_FUNC_TESTANDCLEAR(int, u_int); ASAN_ATOMIC_FUNC_TESTANDCLEAR(long, u_long); ASAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t); ASAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t); ASAN_ATOMIC_FUNC_TESTANDSET(int, u_int); ASAN_ATOMIC_FUNC_TESTANDSET(long, u_long); ASAN_ATOMIC_FUNC_SWAP(32, uint32_t); ASAN_ATOMIC_FUNC_SWAP(64, uint64_t); ASAN_ATOMIC_FUNC_SWAP(int, u_int); ASAN_ATOMIC_FUNC_SWAP(long, u_long); ASAN_ATOMIC_FUNC_SWAP(ptr, uintptr_t); ASAN_ATOMIC_FUNC_CMPSET(8, uint8_t); ASAN_ATOMIC_FUNC_CMPSET(16, uint16_t); ASAN_ATOMIC_FUNC_CMPSET(32, uint32_t); ASAN_ATOMIC_FUNC_CMPSET(64, uint64_t); ASAN_ATOMIC_FUNC_CMPSET(int, u_int); ASAN_ATOMIC_FUNC_CMPSET(long, u_long); ASAN_ATOMIC_FUNC_CMPSET(ptr, uintptr_t); ASAN_ATOMIC_FUNC_FCMPSET(8, uint8_t); ASAN_ATOMIC_FUNC_FCMPSET(16, uint16_t); ASAN_ATOMIC_FUNC_FCMPSET(32, uint32_t); ASAN_ATOMIC_FUNC_FCMPSET(64, uint64_t); ASAN_ATOMIC_FUNC_FCMPSET(int, u_int); ASAN_ATOMIC_FUNC_FCMPSET(long, u_long); ASAN_ATOMIC_FUNC_FCMPSET(ptr, uintptr_t); ASAN_ATOMIC_FUNC_LOAD(8, uint8_t); ASAN_ATOMIC_FUNC_LOAD(16, uint16_t); ASAN_ATOMIC_FUNC_LOAD(32, uint32_t); ASAN_ATOMIC_FUNC_LOAD(64, uint64_t); ASAN_ATOMIC_FUNC_LOAD(char, u_char); ASAN_ATOMIC_FUNC_LOAD(short, u_short); ASAN_ATOMIC_FUNC_LOAD(int, u_int); ASAN_ATOMIC_FUNC_LOAD(long, u_long); ASAN_ATOMIC_FUNC_LOAD(ptr, uintptr_t); ASAN_ATOMIC_FUNC_STORE(8, uint8_t); ASAN_ATOMIC_FUNC_STORE(16, uint16_t); ASAN_ATOMIC_FUNC_STORE(32, uint32_t); ASAN_ATOMIC_FUNC_STORE(64, uint64_t); ASAN_ATOMIC_FUNC_STORE(char, u_char); ASAN_ATOMIC_FUNC_STORE(short, u_short); ASAN_ATOMIC_FUNC_STORE(int, u_int); ASAN_ATOMIC_FUNC_STORE(long, u_long); ASAN_ATOMIC_FUNC_STORE(ptr, uintptr_t); ASAN_ATOMIC_FUNC_THREAD_FENCE(acq); ASAN_ATOMIC_FUNC_THREAD_FENCE(rel); ASAN_ATOMIC_FUNC_THREAD_FENCE(acq_rel); ASAN_ATOMIC_FUNC_THREAD_FENCE(seq_cst); void kasan_atomic_interrupt_fence(void) { } /* -------------------------------------------------------------------------- */ #include #include #include int kasan_bus_space_map(bus_space_tag_t tag, bus_addr_t hnd, bus_size_t size, int flags, bus_space_handle_t *handlep) { return (bus_space_map(tag, hnd, size, flags, handlep)); } void kasan_bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_unmap(tag, hnd, size); } int kasan_bus_space_subregion(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, bus_space_handle_t *handlep) { return (bus_space_subregion(tag, hnd, offset, size, handlep)); } void kasan_bus_space_free(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_free(tag, hnd, size); } void kasan_bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, int flags) { bus_space_barrier(tag, hnd, offset, size, flags); } #define ASAN_BUS_READ_FUNC(func, width, type) \ type kasan_bus_space_read##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset) \ { \ return (bus_space_read##func##_##width(tag, hnd, \ offset)); \ } \ #define ASAN_BUS_READ_PTR_FUNC(func, width, type) \ void kasan_bus_space_read_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, type *buf, \ bus_size_t count) \ { \ kasan_shadow_check((uintptr_t)buf, sizeof(type) * count,\ false, __RET_ADDR); \ bus_space_read_##func##_##width(tag, hnd, size, buf, \ count); \ } ASAN_BUS_READ_FUNC(, 1, uint8_t) ASAN_BUS_READ_FUNC(_stream, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(multi, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(region, 1, uint8_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 1, uint8_t) ASAN_BUS_READ_FUNC(, 2, uint16_t) ASAN_BUS_READ_FUNC(_stream, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(multi, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(region, 2, uint16_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 2, uint16_t) ASAN_BUS_READ_FUNC(, 4, uint32_t) ASAN_BUS_READ_FUNC(_stream, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(multi, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(multi_stream, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(region, 4, uint32_t) ASAN_BUS_READ_PTR_FUNC(region_stream, 4, uint32_t) ASAN_BUS_READ_FUNC(, 8, uint64_t) #define ASAN_BUS_WRITE_FUNC(func, width, type) \ void kasan_bus_space_write##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ bus_space_write##func##_##width(tag, hnd, offset, value);\ } \ #define ASAN_BUS_WRITE_PTR_FUNC(func, width, type) \ void kasan_bus_space_write_##func##_##width(bus_space_tag_t tag,\ bus_space_handle_t hnd, bus_size_t size, const type *buf, \ bus_size_t count) \ { \ kasan_shadow_check((uintptr_t)buf, sizeof(type) * count,\ true, __RET_ADDR); \ bus_space_write_##func##_##width(tag, hnd, size, buf, \ count); \ } ASAN_BUS_WRITE_FUNC(, 1, uint8_t) ASAN_BUS_WRITE_FUNC(_stream, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(multi, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(multi_stream, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(region, 1, uint8_t) ASAN_BUS_WRITE_PTR_FUNC(region_stream, 1, uint8_t) ASAN_BUS_WRITE_FUNC(, 2, uint16_t) ASAN_BUS_WRITE_FUNC(_stream, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(multi, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(multi_stream, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(region, 2, uint16_t) ASAN_BUS_WRITE_PTR_FUNC(region_stream, 2, uint16_t) ASAN_BUS_WRITE_FUNC(, 4, uint32_t) ASAN_BUS_WRITE_FUNC(_stream, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(multi, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(multi_stream, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(region, 4, uint32_t) ASAN_BUS_WRITE_PTR_FUNC(region_stream, 4, uint32_t) ASAN_BUS_WRITE_FUNC(, 8, uint64_t) #define ASAN_BUS_SET_FUNC(func, width, type) \ void kasan_bus_space_set_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value, \ bus_size_t count) \ { \ bus_space_set_##func##_##width(tag, hnd, offset, value, \ count); \ } ASAN_BUS_SET_FUNC(multi, 1, uint8_t) ASAN_BUS_SET_FUNC(region, 1, uint8_t) ASAN_BUS_SET_FUNC(multi_stream, 1, uint8_t) ASAN_BUS_SET_FUNC(region_stream, 1, uint8_t) ASAN_BUS_SET_FUNC(multi, 2, uint16_t) ASAN_BUS_SET_FUNC(region, 2, uint16_t) ASAN_BUS_SET_FUNC(multi_stream, 2, uint16_t) ASAN_BUS_SET_FUNC(region_stream, 2, uint16_t) ASAN_BUS_SET_FUNC(multi, 4, uint32_t) ASAN_BUS_SET_FUNC(region, 4, uint32_t) ASAN_BUS_SET_FUNC(multi_stream, 4, uint32_t) ASAN_BUS_SET_FUNC(region_stream, 4, uint32_t) /* -------------------------------------------------------------------------- */ void __asan_register_globals(struct __asan_global *, size_t); void __asan_unregister_globals(struct __asan_global *, size_t); void __asan_register_globals(struct __asan_global *globals, size_t n) { size_t i; for (i = 0; i < n; i++) { kasan_mark(globals[i].beg, globals[i].size, globals[i].size_with_redzone, KASAN_GENERIC_REDZONE); } } void __asan_unregister_globals(struct __asan_global *globals, size_t n) { size_t i; for (i = 0; i < n; i++) { kasan_mark(globals[i].beg, globals[i].size_with_redzone, globals[i].size_with_redzone, 0); } } #define ASAN_LOAD_STORE(size) \ void __asan_load##size(unsigned long); \ void __asan_load##size(unsigned long addr) \ { \ kasan_shadow_check(addr, size, false, __RET_ADDR);\ } \ void __asan_load##size##_noabort(unsigned long); \ void __asan_load##size##_noabort(unsigned long addr) \ { \ kasan_shadow_check(addr, size, false, __RET_ADDR);\ } \ void __asan_store##size(unsigned long); \ void __asan_store##size(unsigned long addr) \ { \ kasan_shadow_check(addr, size, true, __RET_ADDR);\ } \ void __asan_store##size##_noabort(unsigned long); \ void __asan_store##size##_noabort(unsigned long addr) \ { \ kasan_shadow_check(addr, size, true, __RET_ADDR);\ } ASAN_LOAD_STORE(1); ASAN_LOAD_STORE(2); ASAN_LOAD_STORE(4); ASAN_LOAD_STORE(8); ASAN_LOAD_STORE(16); void __asan_loadN(unsigned long, size_t); void __asan_loadN_noabort(unsigned long, size_t); void __asan_storeN(unsigned long, size_t); void __asan_storeN_noabort(unsigned long, size_t); void __asan_handle_no_return(void); void __asan_loadN(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, false, __RET_ADDR); } void __asan_loadN_noabort(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, false, __RET_ADDR); } void __asan_storeN(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, true, __RET_ADDR); } void __asan_storeN_noabort(unsigned long addr, size_t size) { kasan_shadow_check(addr, size, true, __RET_ADDR); } void __asan_handle_no_return(void) { /* nothing */ } #define ASAN_SET_SHADOW(byte) \ void __asan_set_shadow_##byte(void *, size_t); \ void __asan_set_shadow_##byte(void *addr, size_t size) \ { \ __builtin_memset((void *)addr, 0x##byte, size); \ } ASAN_SET_SHADOW(00); ASAN_SET_SHADOW(f1); ASAN_SET_SHADOW(f2); ASAN_SET_SHADOW(f3); ASAN_SET_SHADOW(f5); ASAN_SET_SHADOW(f8); void __asan_poison_stack_memory(const void *, size_t); void __asan_unpoison_stack_memory(const void *, size_t); void __asan_poison_stack_memory(const void *addr, size_t size) { size = roundup(size, KASAN_SHADOW_SCALE); kasan_shadow_Nbyte_fill(addr, size, KASAN_USE_AFTER_SCOPE); } void __asan_unpoison_stack_memory(const void *addr, size_t size) { kasan_shadow_Nbyte_markvalid(addr, size); } void __asan_alloca_poison(const void *, size_t); void __asan_allocas_unpoison(const void *, const void *); void __asan_alloca_poison(const void *addr, size_t size) { const void *l, *r; KASSERT((vm_offset_t)addr % KASAN_ALLOCA_SCALE_SIZE == 0, ("%s: invalid address %p", __func__, addr)); l = (const uint8_t *)addr - KASAN_ALLOCA_SCALE_SIZE; r = (const uint8_t *)addr + roundup(size, KASAN_ALLOCA_SCALE_SIZE); kasan_shadow_Nbyte_fill(l, KASAN_ALLOCA_SCALE_SIZE, KASAN_STACK_LEFT); kasan_mark(addr, size, roundup(size, KASAN_ALLOCA_SCALE_SIZE), KASAN_STACK_MID); kasan_shadow_Nbyte_fill(r, KASAN_ALLOCA_SCALE_SIZE, KASAN_STACK_RIGHT); } void __asan_allocas_unpoison(const void *stkbegin, const void *stkend) { size_t size; if (__predict_false(!stkbegin)) return; if (__predict_false((uintptr_t)stkbegin > (uintptr_t)stkend)) return; size = (uintptr_t)stkend - (uintptr_t)stkbegin; kasan_shadow_Nbyte_fill(stkbegin, size, 0); } void __asan_poison_memory_region(const void *addr, size_t size); void __asan_unpoison_memory_region(const void *addr, size_t size); void __asan_poison_memory_region(const void *addr, size_t size) { } void __asan_unpoison_memory_region(const void *addr, size_t size) { } diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 671b5c2b8d38..da20492966cd 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -1,644 +1,675 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)systm.h 8.7 (Berkeley) 3/29/95 * $FreeBSD$ */ #ifndef _SYS_SYSTM_H_ #define _SYS_SYSTM_H_ #include #include #include #include #include #include /* for people using printf mainly */ __NULLABILITY_PRAGMA_PUSH #ifdef _KERNEL extern int cold; /* nonzero if we are doing a cold boot */ extern int suspend_blocked; /* block suspend due to pending shutdown */ extern int rebooting; /* kern_reboot() has been called. */ extern const char *panicstr; /* panic message */ extern bool panicked; #define KERNEL_PANICKED() __predict_false(panicked) extern char version[]; /* system version */ extern char compiler_version[]; /* compiler version */ extern char copyright[]; /* system copyright */ extern int kstack_pages; /* number of kernel stack pages */ extern u_long pagesizes[]; /* supported page sizes */ extern long physmem; /* physical memory */ extern long realmem; /* 'real' memory */ extern char *rootdevnames[2]; /* names of possible root devices */ extern int boothowto; /* reboot flags, from console subsystem */ extern int bootverbose; /* nonzero to print verbose messages */ extern int maxusers; /* system tune hint */ extern int ngroups_max; /* max # of supplemental groups */ extern int vm_guest; /* Running as virtual machine guest? */ extern u_long maxphys; /* max raw I/O transfer size */ /* * Detected virtual machine guest types. The intention is to expand * and/or add to the VM_GUEST_VM type if specific VM functionality is * ever implemented (e.g. vendor-specific paravirtualization features). * Keep in sync with vm_guest_sysctl_names[]. */ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV, VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_GUEST_VBOX, VM_GUEST_PARALLELS, VM_LAST }; #ifdef INVARIANTS /* The option is always available */ #define VNASSERT(exp, vp, msg) do { \ if (__predict_false(!(exp))) { \ vn_printf(vp, "VNASSERT failed: %s not true at %s:%d (%s)\n",\ #exp, __FILE__, __LINE__, __func__); \ kassert_panic msg; \ } \ } while (0) #define VNPASS(exp, vp) do { \ const char *_exp = #exp; \ VNASSERT(exp, vp, ("condition %s not met at %s:%d (%s)", \ _exp, __FILE__, __LINE__, __func__)); \ } while (0) #define __assert_unreachable() \ panic("executing segment marked as unreachable at %s:%d (%s)\n", \ __FILE__, __LINE__, __func__) #else #define VNASSERT(exp, vp, msg) do { \ } while (0) #define VNPASS(exp, vp) do { \ } while (0) #define __assert_unreachable() __unreachable() #endif #ifndef CTASSERT /* Allow lint to override */ #define CTASSERT(x) _Static_assert(x, "compile-time assertion failed") #endif #endif /* KERNEL */ /* * These functions need to be declared before the KASSERT macro is invoked in * !KASSERT_PANIC_OPTIONAL builds, so their declarations are sort of out of * place compared to other function definitions in this header. On the other * hand, this header is a bit disorganized anyway. */ void panic(const char *, ...) __dead2 __printflike(1, 2); void vpanic(const char *, __va_list) __dead2 __printflike(1, 0); #if defined(_STANDALONE) struct ucred; /* * Until we have more experience with KASSERTS that are called * from the boot loader, they are off. The bootloader does this * a little differently than the kernel (we just call printf atm). * we avoid most of the common functions in the boot loader, so * declare printf() here too. */ int printf(const char *, ...) __printflike(1, 2); # define kassert_panic printf #else /* !_STANDALONE */ # if defined(WITNESS) || defined(INVARIANT_SUPPORT) # ifdef KASSERT_PANIC_OPTIONAL void kassert_panic(const char *fmt, ...) __printflike(1, 2); # else # define kassert_panic panic # endif /* KASSERT_PANIC_OPTIONAL */ # endif /* defined(WITNESS) || defined(INVARIANT_SUPPORT) */ #endif /* _STANDALONE */ #if defined(INVARIANTS) || defined(_STANDALONE) #define KASSERT(exp,msg) do { \ if (__predict_false(!(exp))) \ kassert_panic msg; \ } while (0) #else /* !INVARIANTS && !_STANDALONE */ #define KASSERT(exp,msg) do { \ } while (0) #endif /* INVARIANTS || _STANDALONE */ /* * Helpful macros for quickly coming up with assertions with informative * panic messages. */ #define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__) #define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__) #define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line) #define MPASS4(ex, what, file, line) \ KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line)) /* * Align variables. */ #define __read_mostly __section(".data.read_mostly") #define __read_frequently __section(".data.read_frequently") #define __exclusive_cache_line __aligned(CACHE_LINE_SIZE) \ __section(".data.exclusive_cache_line") #ifdef _KERNEL #include /* MAXCPU */ #include /* curthread */ #include /* * Assert that a pointer can be loaded from memory atomically. * * This assertion enforces stronger alignment than necessary. For example, * on some architectures, atomicity for unaligned loads will depend on * whether or not the load spans multiple cache lines. */ #define ASSERT_ATOMIC_LOAD_PTR(var, msg) \ KASSERT(sizeof(var) == sizeof(void *) && \ ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* * Assert that a thread is in critical(9) section. */ #define CRITICAL_ASSERT(td) \ KASSERT((td)->td_critnest >= 1, ("Not in critical section")) /* * If we have already panic'd and this is the thread that called * panic(), then don't block on any mutexes but silently succeed. * Otherwise, the kernel will deadlock since the scheduler isn't * going to run the thread that holds any lock we need. */ #define SCHEDULER_STOPPED_TD(td) ({ \ MPASS((td) == curthread); \ __predict_false((td)->td_stopsched); \ }) #define SCHEDULER_STOPPED() SCHEDULER_STOPPED_TD(curthread) extern int osreldate; extern const void *zero_region; /* address space maps to a zeroed page */ extern int unmapped_buf_allowed; #ifdef __LP64__ #define IOSIZE_MAX iosize_max() #define DEVFS_IOSIZE_MAX devfs_iosize_max() #else #define IOSIZE_MAX SSIZE_MAX #define DEVFS_IOSIZE_MAX SSIZE_MAX #endif /* * General function declarations. */ struct inpcb; struct lock_object; struct malloc_type; struct mtx; struct proc; struct socket; struct thread; struct tty; struct ucred; struct uio; struct _jmp_buf; struct trapframe; struct eventtimer; int setjmp(struct _jmp_buf *) __returns_twice; void longjmp(struct _jmp_buf *, int) __dead2; int dumpstatus(vm_offset_t addr, off_t count); int nullop(void); int eopnotsupp(void); int ureadc(int, struct uio *); void hashdestroy(void *, struct malloc_type *, u_long); void *hashinit(int count, struct malloc_type *type, u_long *hashmask); void *hashinit_flags(int count, struct malloc_type *type, u_long *hashmask, int flags); #define HASH_NOWAIT 0x00000001 #define HASH_WAITOK 0x00000002 void *phashinit(int count, struct malloc_type *type, u_long *nentries); void *phashinit_flags(int count, struct malloc_type *type, u_long *nentries, int flags); void g_waitidle(void); void cpu_flush_dcache(void *, size_t); void cpu_rootconf(void); void critical_enter_KBI(void); void critical_exit_KBI(void); void critical_exit_preempt(void); void init_param1(void); void init_param2(long physpages); void init_static_kenv(char *, size_t); void tablefull(const char *); /* * Allocate per-thread "current" state in the linuxkpi */ extern int (*lkpi_alloc_current)(struct thread *, int); int linux_alloc_current_noop(struct thread *, int); #if defined(KLD_MODULE) || defined(KTR_CRITICAL) || !defined(_KERNEL) || defined(GENOFFSET) #define critical_enter() critical_enter_KBI() #define critical_exit() critical_exit_KBI() #else static __inline void critical_enter(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; td->td_critnest++; atomic_interrupt_fence(); } static __inline void critical_exit(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); atomic_interrupt_fence(); td->td_critnest--; atomic_interrupt_fence(); if (__predict_false(td->td_owepreempt)) critical_exit_preempt(); } #endif #ifdef EARLY_PRINTF typedef void early_putc_t(int ch); extern early_putc_t *early_putc; #endif int kvprintf(char const *, void (*)(int, void*), void *, int, __va_list) __printflike(1, 0); void log(int, const char *, ...) __printflike(2, 3); void log_console(struct uio *); void vlog(int, const char *, __va_list) __printflike(2, 0); int asprintf(char **ret, struct malloc_type *mtp, const char *format, ...) __printflike(3, 4); int printf(const char *, ...) __printflike(1, 2); int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); int sprintf(char *buf, const char *, ...) __printflike(2, 3); int uprintf(const char *, ...) __printflike(1, 2); int vprintf(const char *, __va_list) __printflike(1, 0); int vasprintf(char **ret, struct malloc_type *mtp, const char *format, __va_list ap) __printflike(3, 0); int vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0); int vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0); int vsprintf(char *buf, const char *, __va_list) __printflike(2, 0); int sscanf(const char *, char const * _Nonnull, ...) __scanflike(2, 3); int vsscanf(const char * _Nonnull, char const * _Nonnull, __va_list) __scanflike(2, 0); long strtol(const char *, char **, int); u_long strtoul(const char *, char **, int); quad_t strtoq(const char *, char **, int); u_quad_t strtouq(const char *, char **, int); void tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4); void vtprintf(struct proc *, int, const char *, __va_list) __printflike(3, 0); void hexdump(const void *ptr, int length, const char *hdr, int flags); #define HD_COLUMN_MASK 0xff #define HD_DELIM_MASK 0xff00 #define HD_OMIT_COUNT (1 << 16) #define HD_OMIT_HEX (1 << 17) #define HD_OMIT_CHARS (1 << 18) #define ovbcopy(f, t, l) bcopy((f), (t), (l)) void bcopy(const void * _Nonnull from, void * _Nonnull to, size_t len); void bzero(void * _Nonnull buf, size_t len); void explicit_bzero(void * _Nonnull, size_t); int bcmp(const void *b1, const void *b2, size_t len); void *memset(void * _Nonnull buf, int c, size_t len); void *memcpy(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove(void * _Nonnull dest, const void * _Nonnull src, size_t n); int memcmp(const void *b1, const void *b2, size_t len); #ifdef SAN_NEEDS_INTERCEPTORS #define SAN_INTERCEPTOR(func) \ __CONCAT(SAN_INTERCEPTOR_PREFIX, __CONCAT(_, func)) void *SAN_INTERCEPTOR(memset)(void *, int, size_t); void *SAN_INTERCEPTOR(memcpy)(void *, const void *, size_t); void *SAN_INTERCEPTOR(memmove)(void *, const void *, size_t); int SAN_INTERCEPTOR(memcmp)(const void *, const void *, size_t); #ifndef SAN_RUNTIME #define bcopy(from, to, len) SAN_INTERCEPTOR(memmove)((to), (from), (len)) #define bzero(buf, len) SAN_INTERCEPTOR(memset)((buf), 0, (len)) #define bcmp(b1, b2, len) SAN_INTERCEPTOR(memcmp)((b1), (b2), (len)) #define memset(buf, c, len) SAN_INTERCEPTOR(memset)((buf), (c), (len)) #define memcpy(to, from, len) SAN_INTERCEPTOR(memcpy)((to), (from), (len)) #define memmove(dest, src, n) SAN_INTERCEPTOR(memmove)((dest), (src), (n)) #define memcmp(b1, b2, len) SAN_INTERCEPTOR(memcmp)((b1), (b2), (len)) #endif /* !SAN_RUNTIME */ #else /* !SAN_NEEDS_INTERCEPTORS */ #define bcopy(from, to, len) __builtin_memmove((to), (from), (len)) #define bzero(buf, len) __builtin_memset((buf), 0, (len)) #define bcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #define memset(buf, c, len) __builtin_memset((buf), (c), (len)) #define memcpy(to, from, len) __builtin_memcpy((to), (from), (len)) #define memmove(dest, src, n) __builtin_memmove((dest), (src), (n)) #define memcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #endif /* SAN_NEEDS_INTERCEPTORS */ void *memset_early(void * _Nonnull buf, int c, size_t len); #define bzero_early(buf, len) memset_early((buf), 0, (len)) void *memcpy_early(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove_early(void * _Nonnull dest, const void * _Nonnull src, size_t n); #define bcopy_early(from, to, len) memmove_early((to), (from), (len)) #define copystr(src, dst, len, outlen) ({ \ size_t __r, __len, *__outlen; \ \ __len = (len); \ __outlen = (outlen); \ __r = strlcpy((dst), (src), __len); \ if (__outlen != NULL) \ *__outlen = ((__r >= __len) ? __len : __r + 1); \ ((__r >= __len) ? ENAMETOOLONG : 0); \ }) int copyinstr(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len, size_t * __restrict lencopied); int copyin(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyin_nofault(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyout(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); int copyout_nofault(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); #ifdef SAN_NEEDS_INTERCEPTORS int SAN_INTERCEPTOR(copyin)(const void *, void *, size_t); int SAN_INTERCEPTOR(copyinstr)(const void *, void *, size_t, size_t *); int SAN_INTERCEPTOR(copyout)(const void *, void *, size_t); #ifndef SAN_RUNTIME #define copyin(u, k, l) SAN_INTERCEPTOR(copyin)((u), (k), (l)) #define copyinstr(u, k, l, lc) SAN_INTERCEPTOR(copyinstr)((u), (k), (l), (lc)) #define copyout(k, u, l) SAN_INTERCEPTOR(copyout)((k), (u), (l)) #endif /* !SAN_RUNTIME */ #endif /* SAN_NEEDS_INTERCEPTORS */ int fubyte(volatile const void *base); long fuword(volatile const void *base); int fuword16(volatile const void *base); int32_t fuword32(volatile const void *base); int64_t fuword64(volatile const void *base); int fueword(volatile const void *base, long *val); int fueword32(volatile const void *base, int32_t *val); int fueword64(volatile const void *base, int64_t *val); int subyte(volatile void *base, int byte); int suword(volatile void *base, long word); int suword16(volatile void *base, int word); int suword32(volatile void *base, int32_t word); int suword64(volatile void *base, int64_t word); uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval); u_long casuword(volatile u_long *p, u_long oldval, u_long newval); int casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, uint32_t newval); int casueword(volatile u_long *p, u_long oldval, u_long *oldvalp, u_long newval); +#if defined(SAN_NEEDS_INTERCEPTORS) && !defined(KCSAN) +int SAN_INTERCEPTOR(fubyte)(volatile const void *base); +int SAN_INTERCEPTOR(fuword16)(volatile const void *base); +int SAN_INTERCEPTOR(fueword)(volatile const void *base, long *val); +int SAN_INTERCEPTOR(fueword32)(volatile const void *base, int32_t *val); +int SAN_INTERCEPTOR(fueword64)(volatile const void *base, int64_t *val); +int SAN_INTERCEPTOR(subyte)(volatile void *base, int byte); +int SAN_INTERCEPTOR(suword)(volatile void *base, long word); +int SAN_INTERCEPTOR(suword16)(volatile void *base, int word); +int SAN_INTERCEPTOR(suword32)(volatile void *base, int32_t word); +int SAN_INTERCEPTOR(suword64)(volatile void *base, int64_t word); +int SAN_INTERCEPTOR(casueword32)(volatile uint32_t *base, uint32_t oldval, + uint32_t *oldvalp, uint32_t newval); +int SAN_INTERCEPTOR(casueword)(volatile u_long *p, u_long oldval, + u_long *oldvalp, u_long newval); +#ifndef SAN_RUNTIME +#define fubyte(b) SAN_INTERCEPTOR(fubyte)((b)) +#define fuword16(b) SAN_INTERCEPTOR(fuword16)((b)) +#define fueword(b, v) SAN_INTERCEPTOR(fueword)((b), (v)) +#define fueword32(b, v) SAN_INTERCEPTOR(fueword32)((b), (v)) +#define fueword64(b, v) SAN_INTERCEPTOR(fueword64)((b), (v)) +#define subyte(b, w) SAN_INTERCEPTOR(subyte)((b), (w)) +#define suword(b, w) SAN_INTERCEPTOR(suword)((b), (w)) +#define suword16(b, w) SAN_INTERCEPTOR(suword16)((b), (w)) +#define suword32(b, w) SAN_INTERCEPTOR(suword32)((b), (w)) +#define suword64(b, w) SAN_INTERCEPTOR(suword64)((b), (w)) +#define casueword32(b, o, p, n) SAN_INTERCEPTOR(casueword32)((b), (o), (p), (n)) +#define casueword(b, o, p, n) SAN_INTERCEPTOR(casueword)((b), (o), (p), (n)) +#endif /* !SAN_RUNTIME */ +#endif /* SAN_NEEDS_INTERCEPTORS && !KCSAN */ + void realitexpire(void *); int sysbeep(int hertz, int period); void hardclock(int cnt, int usermode); void hardclock_sync(int cpu); void softclock(void *); void statclock(int cnt, int usermode); void profclock(int cnt, int usermode, uintfptr_t pc); int hardclockintr(void); void startprofclock(struct proc *); void stopprofclock(struct proc *); void cpu_startprofclock(void); void cpu_stopprofclock(void); void suspendclock(void); void resumeclock(void); sbintime_t cpu_idleclock(void); void cpu_activeclock(void); void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt); void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq); extern int cpu_disable_c2_sleep; extern int cpu_disable_c3_sleep; char *kern_getenv(const char *name); void freeenv(char *env); int getenv_int(const char *name, int *data); int getenv_uint(const char *name, unsigned int *data); int getenv_long(const char *name, long *data); int getenv_ulong(const char *name, unsigned long *data); int getenv_string(const char *name, char *data, int size); int getenv_int64(const char *name, int64_t *data); int getenv_uint64(const char *name, uint64_t *data); int getenv_quad(const char *name, quad_t *data); int getenv_bool(const char *name, bool *data); bool getenv_is_true(const char *name); bool getenv_is_false(const char *name); int kern_setenv(const char *name, const char *value); int kern_unsetenv(const char *name); int testenv(const char *name); int getenv_array(const char *name, void *data, int size, int *psize, int type_size, bool allow_signed); #define GETENV_UNSIGNED false /* negative numbers not allowed */ #define GETENV_SIGNED true /* negative numbers allowed */ typedef uint64_t (cpu_tick_f)(void); void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var); extern cpu_tick_f *cpu_ticks; uint64_t cpu_tickrate(void); uint64_t cputick2usec(uint64_t tick); #include /* Initialize the world */ void consinit(void); void cpu_initclocks(void); void cpu_initclocks_bsp(void); void cpu_initclocks_ap(void); void usrinfoinit(void); /* Finalize the world */ void kern_reboot(int) __dead2; void shutdown_nice(int); /* Stubs for obsolete functions that used to be for interrupt management */ static __inline intrmask_t splhigh(void) { return 0; } static __inline intrmask_t splimp(void) { return 0; } static __inline intrmask_t splnet(void) { return 0; } static __inline intrmask_t spltty(void) { return 0; } static __inline void splx(intrmask_t ipl __unused) { return; } /* * Common `proc' functions are declared here so that proc.h can be included * less often. */ int _sleep(const void * _Nonnull chan, struct lock_object *lock, int pri, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) #define msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr), \ (flags)) int msleep_spin_sbt(const void * _Nonnull chan, struct mtx *mtx, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep_spin(chan, mtx, wmesg, timo) \ msleep_spin_sbt((chan), (mtx), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define pause(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK) #define pause_sig(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK | C_CATCH) #define tsleep(chan, pri, wmesg, timo) \ _sleep((chan), NULL, (pri), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) #define tsleep_sbt(chan, pri, wmesg, bt, pr, flags) \ _sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags)) void wakeup(const void *chan); void wakeup_one(const void *chan); void wakeup_any(const void *chan); /* * Common `struct cdev *' stuff are declared here to avoid #include poisoning */ struct cdev; dev_t dev2udev(struct cdev *x); const char *devtoname(struct cdev *cdev); #ifdef __LP64__ size_t devfs_iosize_max(void); size_t iosize_max(void); #endif int poll_no_poll(int events); /* XXX: Should be void nanodelay(u_int nsec); */ void DELAY(int usec); /* Root mount holdback API */ struct root_hold_token { int flags; const char *who; TAILQ_ENTRY(root_hold_token) list; }; struct root_hold_token *root_mount_hold(const char *identifier); void root_mount_hold_token(const char *identifier, struct root_hold_token *h); void root_mount_rel(struct root_hold_token *h); int root_mounted(void); /* * Unit number allocation API. (kern/subr_unit.c) */ struct unrhdr; struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex); void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex); void delete_unrhdr(struct unrhdr *uh); void clear_unrhdr(struct unrhdr *uh); void clean_unrhdr(struct unrhdr *uh); void clean_unrhdrl(struct unrhdr *uh); int alloc_unr(struct unrhdr *uh); int alloc_unr_specific(struct unrhdr *uh, u_int item); int alloc_unrl(struct unrhdr *uh); void free_unr(struct unrhdr *uh, u_int item); #ifndef __LP64__ #define UNR64_LOCKED #endif struct unrhdr64 { uint64_t counter; }; static __inline void new_unrhdr64(struct unrhdr64 *unr64, uint64_t low) { unr64->counter = low; } #ifdef UNR64_LOCKED uint64_t alloc_unr64(struct unrhdr64 *); #else static __inline uint64_t alloc_unr64(struct unrhdr64 *unr64) { return (atomic_fetchadd_64(&unr64->counter, 1)); } #endif void intr_prof_stack_use(struct thread *td, struct trapframe *frame); void counted_warning(unsigned *counter, const char *msg); /* * APIs to manage deprecation and obsolescence. */ void _gone_in(int major, const char *msg); void _gone_in_dev(device_t dev, int major, const char *msg); #ifdef NO_OBSOLETE_CODE #define __gone_ok(m, msg) \ _Static_assert(m < P_OSREL_MAJOR(__FreeBSD_version)), \ "Obsolete code: " msg); #else #define __gone_ok(m, msg) #endif #define gone_in(major, msg) __gone_ok(major, msg) _gone_in(major, msg) #define gone_in_dev(dev, major, msg) __gone_ok(major, msg) _gone_in_dev(dev, major, msg) #if defined(INVARIANTS) || defined(WITNESS) #define __diagused #else #define __diagused __unused #endif #endif /* _KERNEL */ __NULLABILITY_PRAGMA_POP #endif /* !_SYS_SYSTM_H_ */