diff --git a/sys/kern/subr_csan.c b/sys/kern/subr_csan.c index 06b0b6ebb020..56d2e59ff12c 100644 --- a/sys/kern/subr_csan.c +++ b/sys/kern/subr_csan.c @@ -1,918 +1,912 @@ /* $NetBSD: subr_csan.c,v 1.5 2019/11/15 08:11:37 maxv Exp $ */ /* * Copyright (c) 2019 The NetBSD Foundation, Inc. * All rights reserved. * Copyright (c) 2019 Andrew Turner * * This code is derived from software contributed to The NetBSD Foundation * by Maxime Villard. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #define SAN_RUNTIME #include "opt_ddb.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #ifdef KCSAN_PANIC #define REPORT panic #else #define REPORT printf #endif typedef struct { uintptr_t addr; uint32_t size; bool write:1; bool atomic:1; uintptr_t pc; } csan_cell_t; typedef struct { bool inited; uint32_t cnt; csan_cell_t cell; } csan_cpu_t; static csan_cpu_t kcsan_cpus[MAXCPU]; static bool kcsan_enabled __read_mostly; #define __RET_ADDR (uintptr_t)__builtin_return_address(0) #define KCSAN_NACCESSES 1024 #define KCSAN_DELAY 10 /* 10 microseconds */ /* -------------------------------------------------------------------------- */ /* The MD code. */ #include /* -------------------------------------------------------------------------- */ static void kcsan_enable(void *dummy __unused) { printf("Enabling KCSCAN, expect reduced performance.\n"); kcsan_enabled = true; } SYSINIT(kcsan_enable, SI_SUB_SMP, SI_ORDER_SECOND, kcsan_enable, NULL); void kcsan_cpu_init(u_int cpu) { kcsan_cpus[cpu].inited = true; } /* -------------------------------------------------------------------------- */ static inline void kcsan_report(csan_cell_t *new, u_int newcpu, csan_cell_t *old, u_int oldcpu) { const char *newsym, *oldsym; #ifdef DDB c_db_sym_t sym; db_expr_t offset; sym = db_search_symbol((vm_offset_t)new->pc, DB_STGY_PROC, &offset); db_symbol_values(sym, &newsym, NULL); sym = db_search_symbol((vm_offset_t)old->pc, DB_STGY_PROC, &offset); db_symbol_values(sym, &oldsym, NULL); #else newsym = ""; oldsym = ""; #endif REPORT("CSan: Racy Access " "[Cpu%u %s%s Addr=%p Size=%u PC=%p<%s>] " "[Cpu%u %s%s Addr=%p Size=%u PC=%p<%s>]\n", newcpu, (new->atomic ? "Atomic " : ""), (new->write ? "Write" : "Read"), (void *)new->addr, new->size, (void *)new->pc, newsym, oldcpu, (old->atomic ? "Atomic " : ""), (old->write ? "Write" : "Read"), (void *)old->addr, old->size, (void *)old->pc, oldsym); kcsan_md_unwind(); } static inline bool kcsan_access_is_atomic(csan_cell_t *new, csan_cell_t *old) { if (new->write && !new->atomic) return false; if (old->write && !old->atomic) return false; return true; } static inline void kcsan_access(uintptr_t addr, size_t size, bool write, bool atomic, uintptr_t pc) { csan_cell_t old, new; csan_cpu_t *cpu; uint64_t intr; size_t i; if (__predict_false(!kcsan_enabled)) return; if (__predict_false(kcsan_md_unsupported((vm_offset_t)addr))) return; if (KERNEL_PANICKED()) return; new.addr = addr; new.size = size; new.write = write; new.atomic = atomic; new.pc = pc; CPU_FOREACH(i) { __builtin_memcpy(&old, &kcsan_cpus[i].cell, sizeof(old)); if (old.addr + old.size <= new.addr) continue; if (new.addr + new.size <= old.addr) continue; if (__predict_true(!old.write && !new.write)) continue; if (__predict_true(kcsan_access_is_atomic(&new, &old))) continue; kcsan_report(&new, PCPU_GET(cpuid), &old, i); break; } if (__predict_false(!kcsan_md_is_avail())) return; kcsan_md_disable_intrs(&intr); cpu = &kcsan_cpus[PCPU_GET(cpuid)]; if (__predict_false(!cpu->inited)) goto out; cpu->cnt = (cpu->cnt + 1) % KCSAN_NACCESSES; if (__predict_true(cpu->cnt != 0)) goto out; __builtin_memcpy(&cpu->cell, &new, sizeof(new)); kcsan_md_delay(KCSAN_DELAY); __builtin_memset(&cpu->cell, 0, sizeof(new)); out: kcsan_md_enable_intrs(&intr); } #define CSAN_READ(size) \ void __tsan_read##size(uintptr_t); \ void __tsan_read##size(uintptr_t addr) \ { \ kcsan_access(addr, size, false, false, __RET_ADDR); \ } \ void __tsan_unaligned_read##size(uintptr_t); \ void __tsan_unaligned_read##size(uintptr_t addr) \ { \ kcsan_access(addr, size, false, false, __RET_ADDR); \ } CSAN_READ(1) CSAN_READ(2) CSAN_READ(4) CSAN_READ(8) CSAN_READ(16) #define CSAN_WRITE(size) \ void __tsan_write##size(uintptr_t); \ void __tsan_write##size(uintptr_t addr) \ { \ kcsan_access(addr, size, true, false, __RET_ADDR); \ } \ void __tsan_unaligned_write##size(uintptr_t); \ void __tsan_unaligned_write##size(uintptr_t addr) \ { \ kcsan_access(addr, size, true, false, __RET_ADDR); \ } CSAN_WRITE(1) CSAN_WRITE(2) CSAN_WRITE(4) CSAN_WRITE(8) CSAN_WRITE(16) void __tsan_read_range(uintptr_t, size_t); void __tsan_write_range(uintptr_t, size_t); void __tsan_read_range(uintptr_t addr, size_t size) { kcsan_access(addr, size, false, false, __RET_ADDR); } void __tsan_write_range(uintptr_t addr, size_t size) { kcsan_access(addr, size, true, false, __RET_ADDR); } void __tsan_init(void); void __tsan_func_entry(void *); void __tsan_func_exit(void); void __tsan_init(void) { } void __tsan_func_entry(void *call_pc) { } void __tsan_func_exit(void) { } /* -------------------------------------------------------------------------- */ void * kcsan_memcpy(void *dst, const void *src, size_t len) { kcsan_access((uintptr_t)src, len, false, false, __RET_ADDR); kcsan_access((uintptr_t)dst, len, true, false, __RET_ADDR); return __builtin_memcpy(dst, src, len); } int kcsan_memcmp(const void *b1, const void *b2, size_t len) { kcsan_access((uintptr_t)b1, len, false, false, __RET_ADDR); kcsan_access((uintptr_t)b2, len, false, false, __RET_ADDR); return __builtin_memcmp(b1, b2, len); } void * kcsan_memset(void *b, int c, size_t len) { kcsan_access((uintptr_t)b, len, true, false, __RET_ADDR); return __builtin_memset(b, c, len); } void * kcsan_memmove(void *dst, const void *src, size_t len) { kcsan_access((uintptr_t)src, len, false, false, __RET_ADDR); kcsan_access((uintptr_t)dst, len, true, false, __RET_ADDR); return __builtin_memmove(dst, src, len); } char * kcsan_strcpy(char *dst, const char *src) { char *save = dst; while (1) { kcsan_access((uintptr_t)src, 1, false, false, __RET_ADDR); kcsan_access((uintptr_t)dst, 1, true, false, __RET_ADDR); *dst = *src; if (*src == '\0') break; src++, dst++; } return save; } int kcsan_strcmp(const char *s1, const char *s2) { while (1) { kcsan_access((uintptr_t)s1, 1, false, false, __RET_ADDR); kcsan_access((uintptr_t)s2, 1, false, false, __RET_ADDR); if (*s1 != *s2) break; if (*s1 == '\0') return 0; s1++, s2++; } return (*(const unsigned char *)s1 - *(const unsigned char *)s2); } size_t kcsan_strlen(const char *str) { const char *s; s = str; while (1) { kcsan_access((uintptr_t)s, 1, false, false, __RET_ADDR); if (*s == '\0') break; s++; } return (s - str); } -#undef copyin -#undef copyin_nofault -#undef copyinstr -#undef copyout -#undef copyout_nofault - int kcsan_copyin(const void *uaddr, void *kaddr, size_t len) { kcsan_access((uintptr_t)kaddr, len, true, false, __RET_ADDR); return copyin(uaddr, kaddr, len); } int kcsan_copyinstr(const void *uaddr, void *kaddr, size_t len, size_t *done) { kcsan_access((uintptr_t)kaddr, len, true, false, __RET_ADDR); return copyinstr(uaddr, kaddr, len, done); } int kcsan_copyout(const void *kaddr, void *uaddr, size_t len) { kcsan_access((uintptr_t)kaddr, len, false, false, __RET_ADDR); return copyout(kaddr, uaddr, len); } /* -------------------------------------------------------------------------- */ #include #define ATOMIC_SAN_PREFIX kcsan #include #define _CSAN_ATOMIC_FUNC_ADD(name, type) \ void kcsan_atomic_add_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_add_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_ADD(name, type) \ _CSAN_ATOMIC_FUNC_ADD(name, type) \ _CSAN_ATOMIC_FUNC_ADD(acq_##name, type) \ _CSAN_ATOMIC_FUNC_ADD(rel_##name, type) #define _CSAN_ATOMIC_FUNC_CLEAR(name, type) \ void kcsan_atomic_clear_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_clear_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_CLEAR(name, type) \ _CSAN_ATOMIC_FUNC_CLEAR(name, type) \ _CSAN_ATOMIC_FUNC_CLEAR(acq_##name, type) \ _CSAN_ATOMIC_FUNC_CLEAR(rel_##name, type) #define _CSAN_ATOMIC_FUNC_CMPSET(name, type) \ int kcsan_atomic_cmpset_##name(volatile type *ptr, type val1, \ type val2) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_cmpset_##name(ptr, val1, val2)); \ } #define CSAN_ATOMIC_FUNC_CMPSET(name, type) \ _CSAN_ATOMIC_FUNC_CMPSET(name, type) \ _CSAN_ATOMIC_FUNC_CMPSET(acq_##name, type) \ _CSAN_ATOMIC_FUNC_CMPSET(rel_##name, type) #define _CSAN_ATOMIC_FUNC_FCMPSET(name, type) \ int kcsan_atomic_fcmpset_##name(volatile type *ptr, type *val1, \ type val2) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_fcmpset_##name(ptr, val1, val2)); \ } #define CSAN_ATOMIC_FUNC_FCMPSET(name, type) \ _CSAN_ATOMIC_FUNC_FCMPSET(name, type) \ _CSAN_ATOMIC_FUNC_FCMPSET(acq_##name, type) \ _CSAN_ATOMIC_FUNC_FCMPSET(rel_##name, type) #define CSAN_ATOMIC_FUNC_FETCHADD(name, type) \ type kcsan_atomic_fetchadd_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_fetchadd_##name(ptr, val)); \ } #define _CSAN_ATOMIC_FUNC_LOAD(name, type) \ type kcsan_atomic_load_##name(volatile type *ptr) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), false, true, \ __RET_ADDR); \ return (atomic_load_##name(ptr)); \ } #define CSAN_ATOMIC_FUNC_LOAD(name, type) \ _CSAN_ATOMIC_FUNC_LOAD(name, type) \ _CSAN_ATOMIC_FUNC_LOAD(acq_##name, type) \ #define CSAN_ATOMIC_FUNC_READANDCLEAR(name, type) \ type kcsan_atomic_readandclear_##name(volatile type *ptr) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_readandclear_##name(ptr)); \ } #define _CSAN_ATOMIC_FUNC_SET(name, type) \ void kcsan_atomic_set_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_set_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_SET(name, type) \ _CSAN_ATOMIC_FUNC_SET(name, type) \ _CSAN_ATOMIC_FUNC_SET(acq_##name, type) \ _CSAN_ATOMIC_FUNC_SET(rel_##name, type) #define _CSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ void kcsan_atomic_subtract_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_subtract_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _CSAN_ATOMIC_FUNC_SUBTRACT(name, type) \ _CSAN_ATOMIC_FUNC_SUBTRACT(acq_##name, type) \ _CSAN_ATOMIC_FUNC_SUBTRACT(rel_##name, type) #define _CSAN_ATOMIC_FUNC_STORE(name, type) \ void kcsan_atomic_store_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ atomic_store_##name(ptr, val); \ } #define CSAN_ATOMIC_FUNC_STORE(name, type) \ _CSAN_ATOMIC_FUNC_STORE(name, type) \ _CSAN_ATOMIC_FUNC_STORE(rel_##name, type) #define CSAN_ATOMIC_FUNC_SWAP(name, type) \ type kcsan_atomic_swap_##name(volatile type *ptr, type val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return(atomic_swap_##name(ptr, val)); \ } #define CSAN_ATOMIC_FUNC_TESTANDCLEAR(name, type) \ int kcsan_atomic_testandclear_##name(volatile type *ptr, u_int val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return(atomic_testandclear_##name(ptr, val)); \ } #define CSAN_ATOMIC_FUNC_TESTANDSET(name, type) \ int kcsan_atomic_testandset_##name(volatile type *ptr, u_int val) \ { \ kcsan_access((uintptr_t)ptr, sizeof(type), true, true, \ __RET_ADDR); \ return (atomic_testandset_##name(ptr, val)); \ } CSAN_ATOMIC_FUNC_ADD(8, uint8_t) CSAN_ATOMIC_FUNC_CLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_CMPSET(8, uint8_t) CSAN_ATOMIC_FUNC_FCMPSET(8, uint8_t) CSAN_ATOMIC_FUNC_LOAD(8, uint8_t) CSAN_ATOMIC_FUNC_SET(8, uint8_t) CSAN_ATOMIC_FUNC_SUBTRACT(8, uint8_t) _CSAN_ATOMIC_FUNC_STORE(8, uint8_t) #if 0 CSAN_ATOMIC_FUNC_FETCHADD(8, uint8_t) CSAN_ATOMIC_FUNC_READANDCLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_SWAP(8, uint8_t) CSAN_ATOMIC_FUNC_TESTANDCLEAR(8, uint8_t) CSAN_ATOMIC_FUNC_TESTANDSET(8, uint8_t) #endif CSAN_ATOMIC_FUNC_ADD(16, uint16_t) CSAN_ATOMIC_FUNC_CLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_CMPSET(16, uint16_t) CSAN_ATOMIC_FUNC_FCMPSET(16, uint16_t) CSAN_ATOMIC_FUNC_LOAD(16, uint16_t) CSAN_ATOMIC_FUNC_SET(16, uint16_t) CSAN_ATOMIC_FUNC_SUBTRACT(16, uint16_t) _CSAN_ATOMIC_FUNC_STORE(16, uint16_t) #if 0 CSAN_ATOMIC_FUNC_FETCHADD(16, uint16_t) CSAN_ATOMIC_FUNC_READANDCLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_SWAP(16, uint16_t) CSAN_ATOMIC_FUNC_TESTANDCLEAR(16, uint16_t) CSAN_ATOMIC_FUNC_TESTANDSET(16, uint16_t) #endif CSAN_ATOMIC_FUNC_ADD(32, uint32_t) CSAN_ATOMIC_FUNC_CLEAR(32, uint32_t) CSAN_ATOMIC_FUNC_CMPSET(32, uint32_t) CSAN_ATOMIC_FUNC_FCMPSET(32, uint32_t) CSAN_ATOMIC_FUNC_FETCHADD(32, uint32_t) CSAN_ATOMIC_FUNC_LOAD(32, uint32_t) CSAN_ATOMIC_FUNC_READANDCLEAR(32, uint32_t) CSAN_ATOMIC_FUNC_SET(32, uint32_t) CSAN_ATOMIC_FUNC_SUBTRACT(32, uint32_t) CSAN_ATOMIC_FUNC_STORE(32, uint32_t) CSAN_ATOMIC_FUNC_SWAP(32, uint32_t) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(32, uint32_t) CSAN_ATOMIC_FUNC_TESTANDSET(32, uint32_t) #endif CSAN_ATOMIC_FUNC_ADD(64, uint64_t) CSAN_ATOMIC_FUNC_CLEAR(64, uint64_t) CSAN_ATOMIC_FUNC_CMPSET(64, uint64_t) CSAN_ATOMIC_FUNC_FCMPSET(64, uint64_t) CSAN_ATOMIC_FUNC_FETCHADD(64, uint64_t) CSAN_ATOMIC_FUNC_LOAD(64, uint64_t) CSAN_ATOMIC_FUNC_READANDCLEAR(64, uint64_t) CSAN_ATOMIC_FUNC_SET(64, uint64_t) CSAN_ATOMIC_FUNC_SUBTRACT(64, uint64_t) CSAN_ATOMIC_FUNC_STORE(64, uint64_t) CSAN_ATOMIC_FUNC_SWAP(64, uint64_t) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(64, uint64_t) CSAN_ATOMIC_FUNC_TESTANDSET(64, uint64_t) #endif CSAN_ATOMIC_FUNC_ADD(char, uint8_t) CSAN_ATOMIC_FUNC_CLEAR(char, uint8_t) CSAN_ATOMIC_FUNC_CMPSET(char, uint8_t) CSAN_ATOMIC_FUNC_FCMPSET(char, uint8_t) CSAN_ATOMIC_FUNC_LOAD(char, uint8_t) CSAN_ATOMIC_FUNC_SET(char, uint8_t) CSAN_ATOMIC_FUNC_SUBTRACT(char, uint8_t) _CSAN_ATOMIC_FUNC_STORE(char, uint8_t) #if 0 CSAN_ATOMIC_FUNC_FETCHADD(char, uint8_t) CSAN_ATOMIC_FUNC_READANDCLEAR(char, uint8_t) CSAN_ATOMIC_FUNC_SWAP(char, uint8_t) CSAN_ATOMIC_FUNC_TESTANDCLEAR(char, uint8_t) CSAN_ATOMIC_FUNC_TESTANDSET(char, uint8_t) #endif CSAN_ATOMIC_FUNC_ADD(short, uint16_t) CSAN_ATOMIC_FUNC_CLEAR(short, uint16_t) CSAN_ATOMIC_FUNC_CMPSET(short, uint16_t) CSAN_ATOMIC_FUNC_FCMPSET(short, uint16_t) CSAN_ATOMIC_FUNC_LOAD(short, uint16_t) CSAN_ATOMIC_FUNC_SET(short, uint16_t) CSAN_ATOMIC_FUNC_SUBTRACT(short, uint16_t) _CSAN_ATOMIC_FUNC_STORE(short, uint16_t) #if 0 CSAN_ATOMIC_FUNC_FETCHADD(short, uint16_t) CSAN_ATOMIC_FUNC_READANDCLEAR(short, uint16_t) CSAN_ATOMIC_FUNC_SWAP(short, uint16_t) CSAN_ATOMIC_FUNC_TESTANDCLEAR(short, uint16_t) CSAN_ATOMIC_FUNC_TESTANDSET(short, uint16_t) #endif CSAN_ATOMIC_FUNC_ADD(int, u_int) CSAN_ATOMIC_FUNC_CLEAR(int, u_int) CSAN_ATOMIC_FUNC_CMPSET(int, u_int) CSAN_ATOMIC_FUNC_FCMPSET(int, u_int) CSAN_ATOMIC_FUNC_FETCHADD(int, u_int) CSAN_ATOMIC_FUNC_LOAD(int, u_int) CSAN_ATOMIC_FUNC_READANDCLEAR(int, u_int) CSAN_ATOMIC_FUNC_SET(int, u_int) CSAN_ATOMIC_FUNC_SUBTRACT(int, u_int) CSAN_ATOMIC_FUNC_STORE(int, u_int) CSAN_ATOMIC_FUNC_SWAP(int, u_int) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(int, u_int) CSAN_ATOMIC_FUNC_TESTANDSET(int, u_int) #endif CSAN_ATOMIC_FUNC_ADD(long, u_long) CSAN_ATOMIC_FUNC_CLEAR(long, u_long) CSAN_ATOMIC_FUNC_CMPSET(long, u_long) CSAN_ATOMIC_FUNC_FCMPSET(long, u_long) CSAN_ATOMIC_FUNC_FETCHADD(long, u_long) CSAN_ATOMIC_FUNC_LOAD(long, u_long) CSAN_ATOMIC_FUNC_READANDCLEAR(long, u_long) CSAN_ATOMIC_FUNC_SET(long, u_long) CSAN_ATOMIC_FUNC_SUBTRACT(long, u_long) CSAN_ATOMIC_FUNC_STORE(long, u_long) CSAN_ATOMIC_FUNC_SWAP(long, u_long) #if !defined(__aarch64__) CSAN_ATOMIC_FUNC_TESTANDCLEAR(long, u_long) CSAN_ATOMIC_FUNC_TESTANDSET(long, u_long) CSAN_ATOMIC_FUNC_TESTANDSET(acq_long, u_long) #endif CSAN_ATOMIC_FUNC_ADD(ptr, uintptr_t) CSAN_ATOMIC_FUNC_CLEAR(ptr, uintptr_t) CSAN_ATOMIC_FUNC_CMPSET(ptr, uintptr_t) CSAN_ATOMIC_FUNC_FCMPSET(ptr, uintptr_t) #if !defined(__amd64__) CSAN_ATOMIC_FUNC_FETCHADD(ptr, uintptr_t) #endif CSAN_ATOMIC_FUNC_LOAD(ptr, uintptr_t) CSAN_ATOMIC_FUNC_READANDCLEAR(ptr, uintptr_t) CSAN_ATOMIC_FUNC_SET(ptr, uintptr_t) CSAN_ATOMIC_FUNC_SUBTRACT(ptr, uintptr_t) CSAN_ATOMIC_FUNC_STORE(ptr, uintptr_t) CSAN_ATOMIC_FUNC_SWAP(ptr, uintptr_t) #if 0 CSAN_ATOMIC_FUNC_TESTANDCLEAR(ptr, uintptr_t) CSAN_ATOMIC_FUNC_TESTANDSET(ptr, uintptr_t) #endif #define CSAN_ATOMIC_FUNC_THREAD_FENCE(name) \ void kcsan_atomic_thread_fence_##name(void) \ { \ atomic_thread_fence_##name(); \ } CSAN_ATOMIC_FUNC_THREAD_FENCE(acq) CSAN_ATOMIC_FUNC_THREAD_FENCE(acq_rel) CSAN_ATOMIC_FUNC_THREAD_FENCE(rel) CSAN_ATOMIC_FUNC_THREAD_FENCE(seq_cst) void kcsan_atomic_interrupt_fence(void) { atomic_interrupt_fence(); } /* -------------------------------------------------------------------------- */ #include #include #define BUS_SAN_PREFIX kcsan #include int kcsan_bus_space_map(bus_space_tag_t tag, bus_addr_t hnd, bus_size_t size, int flags, bus_space_handle_t *handlep) { return (bus_space_map(tag, hnd, size, flags, handlep)); } void kcsan_bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_unmap(tag, hnd, size); } int kcsan_bus_space_subregion(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, bus_space_handle_t *handlep) { return (bus_space_subregion(tag, hnd, offset, size, handlep)); } #if !defined(__amd64__) int kcsan_bus_space_alloc(bus_space_tag_t tag, bus_addr_t reg_start, bus_addr_t reg_end, bus_size_t size, bus_size_t alignment, bus_size_t boundary, int flags, bus_addr_t *addrp, bus_space_handle_t *handlep) { return (bus_space_alloc(tag, reg_start, reg_end, size, alignment, boundary, flags, addrp, handlep)); } #endif void kcsan_bus_space_free(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t size) { bus_space_free(tag, hnd, size); } void kcsan_bus_space_barrier(bus_space_tag_t tag, bus_space_handle_t hnd, bus_size_t offset, bus_size_t size, int flags) { bus_space_barrier(tag, hnd, offset, size, flags); } #define CSAN_BUS_READ_FUNC(func, width, type) \ type kcsan_bus_space_read##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset) \ { \ return (bus_space_read##func##_##width(tag, hnd, \ offset)); \ } \ #define CSAN_BUS_READ_PTR_FUNC(func, width, type) \ void kcsan_bus_space_read_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, type *buf, \ bus_size_t count) \ { \ kcsan_access((uintptr_t)buf, sizeof(type) * count, \ false, false, __RET_ADDR); \ bus_space_read_##func##_##width(tag, hnd, size, buf, \ count); \ } CSAN_BUS_READ_FUNC(, 1, uint8_t) CSAN_BUS_READ_FUNC(_stream, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(multi, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(region, 1, uint8_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 1, uint8_t) CSAN_BUS_READ_FUNC(, 2, uint16_t) CSAN_BUS_READ_FUNC(_stream, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(multi, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(region, 2, uint16_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 2, uint16_t) CSAN_BUS_READ_FUNC(, 4, uint32_t) CSAN_BUS_READ_FUNC(_stream, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(multi, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(region, 4, uint32_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 4, uint32_t) CSAN_BUS_READ_FUNC(, 8, uint64_t) #if defined(__aarch64__) CSAN_BUS_READ_FUNC(_stream, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(multi, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(multi_stream, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(region, 8, uint64_t) CSAN_BUS_READ_PTR_FUNC(region_stream, 8, uint64_t) #endif #define CSAN_BUS_WRITE_FUNC(func, width, type) \ void kcsan_bus_space_write##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ bus_space_write##func##_##width(tag, hnd, offset, value); \ } \ #define CSAN_BUS_WRITE_PTR_FUNC(func, width, type) \ void kcsan_bus_space_write_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t size, const type *buf, \ bus_size_t count) \ { \ kcsan_access((uintptr_t)buf, sizeof(type) * count, \ true, false, __RET_ADDR); \ bus_space_write_##func##_##width(tag, hnd, size, buf, \ count); \ } CSAN_BUS_WRITE_FUNC(, 1, uint8_t) CSAN_BUS_WRITE_FUNC(_stream, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(region, 1, uint8_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 1, uint8_t) CSAN_BUS_WRITE_FUNC(, 2, uint16_t) CSAN_BUS_WRITE_FUNC(_stream, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(region, 2, uint16_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 2, uint16_t) CSAN_BUS_WRITE_FUNC(, 4, uint32_t) CSAN_BUS_WRITE_FUNC(_stream, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(region, 4, uint32_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 4, uint32_t) CSAN_BUS_WRITE_FUNC(, 8, uint64_t) #if defined(__aarch64__) CSAN_BUS_WRITE_FUNC(_stream, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(multi, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(multi_stream, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(region, 8, uint64_t) CSAN_BUS_WRITE_PTR_FUNC(region_stream, 8, uint64_t) #endif #define CSAN_BUS_SET_FUNC(func, width, type) \ void kcsan_bus_space_set_##func##_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value, \ bus_size_t count) \ { \ bus_space_set_##func##_##width(tag, hnd, offset, value, \ count); \ } CSAN_BUS_SET_FUNC(multi, 1, uint8_t) CSAN_BUS_SET_FUNC(region, 1, uint8_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 1, uint8_t) CSAN_BUS_SET_FUNC(region_stream, 1, uint8_t) #endif CSAN_BUS_SET_FUNC(multi, 2, uint16_t) CSAN_BUS_SET_FUNC(region, 2, uint16_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 2, uint16_t) CSAN_BUS_SET_FUNC(region_stream, 2, uint16_t) #endif CSAN_BUS_SET_FUNC(multi, 4, uint32_t) CSAN_BUS_SET_FUNC(region, 4, uint32_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 4, uint32_t) CSAN_BUS_SET_FUNC(region_stream, 4, uint32_t) #endif #if !defined(__amd64__) CSAN_BUS_SET_FUNC(multi, 8, uint64_t) CSAN_BUS_SET_FUNC(region, 8, uint64_t) #if !defined(__aarch64__) CSAN_BUS_SET_FUNC(multi_stream, 8, uint64_t) CSAN_BUS_SET_FUNC(region_stream, 8, uint64_t) #endif #endif #define CSAN_BUS_PEEK_FUNC(width, type) \ int kcsan_bus_space_peek_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type *value) \ { \ kcsan_access((uintptr_t)value, sizeof(type), true, false, \ __RET_ADDR); \ return (bus_space_peek_##width(tag, hnd, offset, value)); \ } CSAN_BUS_PEEK_FUNC(1, uint8_t) CSAN_BUS_PEEK_FUNC(2, uint16_t) CSAN_BUS_PEEK_FUNC(4, uint32_t) #if !defined(__i386__) CSAN_BUS_PEEK_FUNC(8, uint64_t) #endif #define CSAN_BUS_POKE_FUNC(width, type) \ int kcsan_bus_space_poke_##width(bus_space_tag_t tag, \ bus_space_handle_t hnd, bus_size_t offset, type value) \ { \ return (bus_space_poke_##width(tag, hnd, offset, value)); \ } CSAN_BUS_POKE_FUNC(1, uint8_t) CSAN_BUS_POKE_FUNC(2, uint16_t) CSAN_BUS_POKE_FUNC(4, uint32_t) #if !defined(__i386__) CSAN_BUS_POKE_FUNC(8, uint64_t) #endif diff --git a/sys/sys/libkern.h b/sys/sys/libkern.h index 8517c0dab1f6..d8d3dce1b705 100644 --- a/sys/sys/libkern.h +++ b/sys/sys/libkern.h @@ -1,235 +1,237 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)libkern.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _SYS_LIBKERN_H_ #define _SYS_LIBKERN_H_ #include #include #ifdef _KERNEL #include #endif #ifndef LIBKERN_INLINE #define LIBKERN_INLINE static __inline #define LIBKERN_BODY #endif /* BCD conversions. */ extern u_char const bcd2bin_data[]; extern u_char const bin2bcd_data[]; extern char const hex2ascii_data[]; #define LIBKERN_LEN_BCD2BIN 154 #define LIBKERN_LEN_BIN2BCD 100 #define LIBKERN_LEN_HEX2ASCII 36 static inline u_char bcd2bin(int bcd) { KASSERT(bcd >= 0 && bcd < LIBKERN_LEN_BCD2BIN, ("invalid bcd %d", bcd)); return (bcd2bin_data[bcd]); } static inline u_char bin2bcd(int bin) { KASSERT(bin >= 0 && bin < LIBKERN_LEN_BIN2BCD, ("invalid bin %d", bin)); return (bin2bcd_data[bin]); } static inline char hex2ascii(int hex) { KASSERT(hex >= 0 && hex < LIBKERN_LEN_HEX2ASCII, ("invalid hex %d", hex)); return (hex2ascii_data[hex]); } static inline bool validbcd(int bcd) { return (bcd == 0 || (bcd > 0 && bcd <= 0x99 && bcd2bin_data[bcd] != 0)); } static __inline int imax(int a, int b) { return (a > b ? a : b); } static __inline int imin(int a, int b) { return (a < b ? a : b); } static __inline long lmax(long a, long b) { return (a > b ? a : b); } static __inline long lmin(long a, long b) { return (a < b ? a : b); } static __inline u_int max(u_int a, u_int b) { return (a > b ? a : b); } static __inline u_int min(u_int a, u_int b) { return (a < b ? a : b); } static __inline quad_t qmax(quad_t a, quad_t b) { return (a > b ? a : b); } static __inline quad_t qmin(quad_t a, quad_t b) { return (a < b ? a : b); } static __inline u_quad_t uqmax(u_quad_t a, u_quad_t b) { return (a > b ? a : b); } static __inline u_quad_t uqmin(u_quad_t a, u_quad_t b) { return (a < b ? a : b); } static __inline u_long ulmax(u_long a, u_long b) { return (a > b ? a : b); } static __inline u_long ulmin(u_long a, u_long b) { return (a < b ? a : b); } static __inline __uintmax_t ummax(__uintmax_t a, __uintmax_t b) { return (a > b ? a : b); } static __inline __uintmax_t ummin(__uintmax_t a, __uintmax_t b) { return (a < b ? a : b); } static __inline off_t omax(off_t a, off_t b) { return (a > b ? a : b); } static __inline off_t omin(off_t a, off_t b) { return (a < b ? a : b); } static __inline int abs(int a) { return (a < 0 ? -a : a); } static __inline long labs(long a) { return (a < 0 ? -a : a); } static __inline quad_t qabs(quad_t a) { return (a < 0 ? -a : a); } #ifndef RANDOM_FENESTRASX #define ARC4_ENTR_NONE 0 /* Don't have entropy yet. */ #define ARC4_ENTR_HAVE 1 /* Have entropy. */ #define ARC4_ENTR_SEED 2 /* Reseeding. */ extern int arc4rand_iniseed_state; #endif /* Prototypes for non-quad routines. */ struct malloc_type; uint32_t arc4random(void); void arc4random_buf(void *, size_t); uint32_t arc4random_uniform(uint32_t); void arc4rand(void *, u_int, int); int timingsafe_bcmp(const void *, const void *, size_t); void *bsearch(const void *, const void *, size_t, size_t, int (*)(const void *, const void *)); #ifndef HAVE_INLINE_FFS int ffs(int); #endif #ifndef HAVE_INLINE_FFSL int ffsl(long); #endif #ifndef HAVE_INLINE_FFSLL int ffsll(long long); #endif #ifndef HAVE_INLINE_FLS int fls(int); #endif #ifndef HAVE_INLINE_FLSL int flsl(long); #endif #ifndef HAVE_INLINE_FLSLL int flsll(long long); #endif #define bitcount64(x) __bitcount64((uint64_t)(x)) #define bitcount32(x) __bitcount32((uint32_t)(x)) #define bitcount16(x) __bitcount16((uint16_t)(x)) #define bitcountl(x) __bitcountl((u_long)(x)) #define bitcount(x) __bitcount((u_int)(x)) int fnmatch(const char *, const char *, int); int locc(int, char *, u_int); void *memchr(const void *s, int c, size_t n); void *memcchr(const void *s, int c, size_t n); void *memmem(const void *l, size_t l_len, const void *s, size_t s_len); void qsort(void *base, size_t nmemb, size_t size, int (*compar)(const void *, const void *)); void qsort_r(void *base, size_t nmemb, size_t size, void *thunk, int (*compar)(void *, const void *, const void *)); u_long random(void); int scanc(u_int, const u_char *, const u_char *, int); int strcasecmp(const char *, const char *); char *strcasestr(const char *, const char *); char *strcat(char * __restrict, const char * __restrict); char *strchr(const char *, int); char *strchrnul(const char *, int); int strcmp(const char *, const char *); char *strcpy(char * __restrict, const char * __restrict); char *strdup_flags(const char *__restrict, struct malloc_type *, int); size_t strcspn(const char *, const char *) __pure; char *strdup(const char *__restrict, struct malloc_type *); char *strncat(char *, const char *, size_t); char *strndup(const char *__restrict, size_t, struct malloc_type *); size_t strlcat(char *, const char *, size_t); size_t strlcpy(char *, const char *, size_t); size_t strlen(const char *); int strncasecmp(const char *, const char *, size_t); int strncmp(const char *, const char *, size_t); char *strncpy(char * __restrict, const char * __restrict, size_t); size_t strnlen(const char *, size_t); char *strnstr(const char *, const char *, size_t); char *strrchr(const char *, int); char *strsep(char **, const char *delim); size_t strspn(const char *, const char *); char *strstr(const char *, const char *); int strvalid(const char *, size_t); -#ifdef KCSAN -char *kcsan_strcpy(char *, const char *); -int kcsan_strcmp(const char *, const char *); -size_t kcsan_strlen(const char *); -#define strcpy(d, s) kcsan_strcpy((d), (s)) -#define strcmp(s1, s2) kcsan_strcmp((s1), (s2)) -#define strlen(s) kcsan_strlen((s)) +#ifdef SAN_PREFIX +char *SAN_INTERCEPTOR(strcpy)(char *, const char *); +int SAN_INTERCEPTOR(strcmp)(const char *, const char *); +size_t SAN_INTERCEPTOR(strlen)(const char *); +#ifndef SAN_RUNTIME +#define strcpy(d, s) SAN_INTERCEPTOR(strcpy)((d), (s)) +#define strcmp(s1, s2) SAN_INTERCEPTOR(strcmp)((s1), (s2)) +#define strlen(s) SAN_INTERCEPTOR(strlen)(s) +#endif /* !SAN_RUNTIME */ #else #define strcpy(d, s) __builtin_strcpy((d), (s)) #define strcmp(s1, s2) __builtin_strcmp((s1), (s2)) #define strlen(s) __builtin_strlen((s)) -#endif +#endif /* SAN_PREFIX */ static __inline char * index(const char *p, int ch) { return (strchr(p, ch)); } static __inline char * rindex(const char *p, int ch) { return (strrchr(p, ch)); } /* fnmatch() return values. */ #define FNM_NOMATCH 1 /* Match failed. */ /* fnmatch() flags. */ #define FNM_NOESCAPE 0x01 /* Disable backslash escaping. */ #define FNM_PATHNAME 0x02 /* Slash must be matched by slash. */ #define FNM_PERIOD 0x04 /* Period must be matched by period. */ #define FNM_LEADING_DIR 0x08 /* Ignore / after Imatch. */ #define FNM_CASEFOLD 0x10 /* Case insensitive search. */ #define FNM_IGNORECASE FNM_CASEFOLD #define FNM_FILE_NAME FNM_PATHNAME #endif /* !_SYS_LIBKERN_H_ */ diff --git a/sys/sys/systm.h b/sys/sys/systm.h index 36d48fbf080d..8080f22266e2 100644 --- a/sys/sys/systm.h +++ b/sys/sys/systm.h @@ -1,638 +1,650 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1988, 1991, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)systm.h 8.7 (Berkeley) 3/29/95 * $FreeBSD$ */ #ifndef _SYS_SYSTM_H_ #define _SYS_SYSTM_H_ #include #include #include #include #include #include /* for people using printf mainly */ __NULLABILITY_PRAGMA_PUSH #ifdef _KERNEL extern int cold; /* nonzero if we are doing a cold boot */ extern int suspend_blocked; /* block suspend due to pending shutdown */ extern int rebooting; /* kern_reboot() has been called. */ extern const char *panicstr; /* panic message */ extern bool panicked; #define KERNEL_PANICKED() __predict_false(panicked) extern char version[]; /* system version */ extern char compiler_version[]; /* compiler version */ extern char copyright[]; /* system copyright */ extern int kstack_pages; /* number of kernel stack pages */ extern u_long pagesizes[]; /* supported page sizes */ extern long physmem; /* physical memory */ extern long realmem; /* 'real' memory */ extern char *rootdevnames[2]; /* names of possible root devices */ extern int boothowto; /* reboot flags, from console subsystem */ extern int bootverbose; /* nonzero to print verbose messages */ extern int maxusers; /* system tune hint */ extern int ngroups_max; /* max # of supplemental groups */ extern int vm_guest; /* Running as virtual machine guest? */ extern u_long maxphys; /* max raw I/O transfer size */ /* * Detected virtual machine guest types. The intention is to expand * and/or add to the VM_GUEST_VM type if specific VM functionality is * ever implemented (e.g. vendor-specific paravirtualization features). * Keep in sync with vm_guest_sysctl_names[]. */ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_GUEST_XEN, VM_GUEST_HV, VM_GUEST_VMWARE, VM_GUEST_KVM, VM_GUEST_BHYVE, VM_GUEST_VBOX, VM_GUEST_PARALLELS, VM_LAST }; #ifdef INVARIANTS /* The option is always available */ #define VNASSERT(exp, vp, msg) do { \ if (__predict_false(!(exp))) { \ vn_printf(vp, "VNASSERT failed: %s not true at %s:%d (%s)\n",\ #exp, __FILE__, __LINE__, __func__); \ kassert_panic msg; \ } \ } while (0) #define VNPASS(exp, vp) do { \ const char *_exp = #exp; \ VNASSERT(exp, vp, ("condition %s not met at %s:%d (%s)", \ _exp, __FILE__, __LINE__, __func__)); \ } while (0) #define __assert_unreachable() \ panic("executing segment marked as unreachable at %s:%d (%s)\n", \ __FILE__, __LINE__, __func__) #else #define VNASSERT(exp, vp, msg) do { \ } while (0) #define VNPASS(exp, vp) do { \ } while (0) #define __assert_unreachable() __unreachable() #endif #ifndef CTASSERT /* Allow lint to override */ #define CTASSERT(x) _Static_assert(x, "compile-time assertion failed") #endif #endif /* KERNEL */ /* * These functions need to be declared before the KASSERT macro is invoked in * !KASSERT_PANIC_OPTIONAL builds, so their declarations are sort of out of * place compared to other function definitions in this header. On the other * hand, this header is a bit disorganized anyway. */ void panic(const char *, ...) __dead2 __printflike(1, 2); void vpanic(const char *, __va_list) __dead2 __printflike(1, 0); #if defined(_STANDALONE) struct ucred; /* * Until we have more experience with KASSERTS that are called * from the boot loader, they are off. The bootloader does this * a little differently than the kernel (we just call printf atm). * we avoid most of the common functions in the boot loader, so * declare printf() here too. */ int printf(const char *, ...) __printflike(1, 2); # define kassert_panic printf #else /* !_STANDALONE */ # if defined(WITNESS) || defined(INVARIANT_SUPPORT) # ifdef KASSERT_PANIC_OPTIONAL void kassert_panic(const char *fmt, ...) __printflike(1, 2); # else # define kassert_panic panic # endif /* KASSERT_PANIC_OPTIONAL */ # endif /* defined(WITNESS) || defined(INVARIANT_SUPPORT) */ #endif /* _STANDALONE */ #if defined(INVARIANTS) || defined(_STANDALONE) #define KASSERT(exp,msg) do { \ if (__predict_false(!(exp))) \ kassert_panic msg; \ } while (0) #else /* !INVARIANTS && !_STANDALONE */ #define KASSERT(exp,msg) do { \ } while (0) #endif /* INVARIANTS || _STANDALONE */ /* * Helpful macros for quickly coming up with assertions with informative * panic messages. */ #define MPASS(ex) MPASS4(ex, #ex, __FILE__, __LINE__) #define MPASS2(ex, what) MPASS4(ex, what, __FILE__, __LINE__) #define MPASS3(ex, file, line) MPASS4(ex, #ex, file, line) #define MPASS4(ex, what, file, line) \ KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line)) /* * Align variables. */ #define __read_mostly __section(".data.read_mostly") #define __read_frequently __section(".data.read_frequently") #define __exclusive_cache_line __aligned(CACHE_LINE_SIZE) \ __section(".data.exclusive_cache_line") #ifdef _KERNEL #include /* MAXCPU */ #include /* curthread */ #include /* * Assert that a pointer can be loaded from memory atomically. * * This assertion enforces stronger alignment than necessary. For example, * on some architectures, atomicity for unaligned loads will depend on * whether or not the load spans multiple cache lines. */ #define ASSERT_ATOMIC_LOAD_PTR(var, msg) \ KASSERT(sizeof(var) == sizeof(void *) && \ ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* * Assert that a thread is in critical(9) section. */ #define CRITICAL_ASSERT(td) \ KASSERT((td)->td_critnest >= 1, ("Not in critical section")) /* * If we have already panic'd and this is the thread that called * panic(), then don't block on any mutexes but silently succeed. * Otherwise, the kernel will deadlock since the scheduler isn't * going to run the thread that holds any lock we need. */ #define SCHEDULER_STOPPED_TD(td) ({ \ MPASS((td) == curthread); \ __predict_false((td)->td_stopsched); \ }) #define SCHEDULER_STOPPED() SCHEDULER_STOPPED_TD(curthread) extern int osreldate; extern const void *zero_region; /* address space maps to a zeroed page */ extern int unmapped_buf_allowed; #ifdef __LP64__ #define IOSIZE_MAX iosize_max() #define DEVFS_IOSIZE_MAX devfs_iosize_max() #else #define IOSIZE_MAX SSIZE_MAX #define DEVFS_IOSIZE_MAX SSIZE_MAX #endif /* * General function declarations. */ struct inpcb; struct lock_object; struct malloc_type; struct mtx; struct proc; struct socket; struct thread; struct tty; struct ucred; struct uio; struct _jmp_buf; struct trapframe; struct eventtimer; int setjmp(struct _jmp_buf *) __returns_twice; void longjmp(struct _jmp_buf *, int) __dead2; int dumpstatus(vm_offset_t addr, off_t count); int nullop(void); int eopnotsupp(void); int ureadc(int, struct uio *); void hashdestroy(void *, struct malloc_type *, u_long); void *hashinit(int count, struct malloc_type *type, u_long *hashmask); void *hashinit_flags(int count, struct malloc_type *type, u_long *hashmask, int flags); #define HASH_NOWAIT 0x00000001 #define HASH_WAITOK 0x00000002 void *phashinit(int count, struct malloc_type *type, u_long *nentries); void *phashinit_flags(int count, struct malloc_type *type, u_long *nentries, int flags); void g_waitidle(void); void cpu_flush_dcache(void *, size_t); void cpu_rootconf(void); void critical_enter_KBI(void); void critical_exit_KBI(void); void critical_exit_preempt(void); void init_param1(void); void init_param2(long physpages); void init_static_kenv(char *, size_t); void tablefull(const char *); /* * Allocate per-thread "current" state in the linuxkpi */ extern int (*lkpi_alloc_current)(struct thread *, int); int linux_alloc_current_noop(struct thread *, int); #if defined(KLD_MODULE) || defined(KTR_CRITICAL) || !defined(_KERNEL) || defined(GENOFFSET) #define critical_enter() critical_enter_KBI() #define critical_exit() critical_exit_KBI() #else static __inline void critical_enter(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; td->td_critnest++; atomic_interrupt_fence(); } static __inline void critical_exit(void) { struct thread_lite *td; td = (struct thread_lite *)curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); atomic_interrupt_fence(); td->td_critnest--; atomic_interrupt_fence(); if (__predict_false(td->td_owepreempt)) critical_exit_preempt(); } #endif #ifdef EARLY_PRINTF typedef void early_putc_t(int ch); extern early_putc_t *early_putc; #endif int kvprintf(char const *, void (*)(int, void*), void *, int, __va_list) __printflike(1, 0); void log(int, const char *, ...) __printflike(2, 3); void log_console(struct uio *); void vlog(int, const char *, __va_list) __printflike(2, 0); int asprintf(char **ret, struct malloc_type *mtp, const char *format, ...) __printflike(3, 4); int printf(const char *, ...) __printflike(1, 2); int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); int sprintf(char *buf, const char *, ...) __printflike(2, 3); int uprintf(const char *, ...) __printflike(1, 2); int vprintf(const char *, __va_list) __printflike(1, 0); int vasprintf(char **ret, struct malloc_type *mtp, const char *format, __va_list ap) __printflike(3, 0); int vsnprintf(char *, size_t, const char *, __va_list) __printflike(3, 0); int vsnrprintf(char *, size_t, int, const char *, __va_list) __printflike(4, 0); int vsprintf(char *buf, const char *, __va_list) __printflike(2, 0); int sscanf(const char *, char const * _Nonnull, ...) __scanflike(2, 3); int vsscanf(const char * _Nonnull, char const * _Nonnull, __va_list) __scanflike(2, 0); long strtol(const char *, char **, int); u_long strtoul(const char *, char **, int); quad_t strtoq(const char *, char **, int); u_quad_t strtouq(const char *, char **, int); void tprintf(struct proc *p, int pri, const char *, ...) __printflike(3, 4); void vtprintf(struct proc *, int, const char *, __va_list) __printflike(3, 0); void hexdump(const void *ptr, int length, const char *hdr, int flags); #define HD_COLUMN_MASK 0xff #define HD_DELIM_MASK 0xff00 #define HD_OMIT_COUNT (1 << 16) #define HD_OMIT_HEX (1 << 17) #define HD_OMIT_CHARS (1 << 18) #define ovbcopy(f, t, l) bcopy((f), (t), (l)) void bcopy(const void * _Nonnull from, void * _Nonnull to, size_t len); void bzero(void * _Nonnull buf, size_t len); void explicit_bzero(void * _Nonnull, size_t); int bcmp(const void *b1, const void *b2, size_t len); void *memset(void * _Nonnull buf, int c, size_t len); void *memcpy(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove(void * _Nonnull dest, const void * _Nonnull src, size_t n); int memcmp(const void *b1, const void *b2, size_t len); -#ifdef KCSAN -void *kcsan_memset(void *, int, size_t); -void *kcsan_memcpy(void *, const void *, size_t); -void *kcsan_memmove(void *, const void *, size_t); -int kcsan_memcmp(const void *, const void *, size_t); -#define bcopy(from, to, len) kcsan_memmove((to), (from), (len)) -#define bzero(buf, len) kcsan_memset((buf), 0, (len)) -#define bcmp(b1, b2, len) kcsan_memcmp((b1), (b2), (len)) -#define memset(buf, c, len) kcsan_memset((buf), (c), (len)) -#define memcpy(to, from, len) kcsan_memcpy((to), (from), (len)) -#define memmove(dest, src, n) kcsan_memmove((dest), (src), (n)) -#define memcmp(b1, b2, len) kcsan_memcmp((b1), (b2), (len)) +#if defined(KASAN) +#define SAN_PREFIX kasan_ +#elif defined(KCSAN) +#define SAN_PREFIX kcsan_ +#endif + +#ifdef SAN_PREFIX +#define SAN_INTERCEPTOR(func) __CONCAT(SAN_PREFIX, func) + +void *SAN_INTERCEPTOR(memset)(void *, int, size_t); +void *SAN_INTERCEPTOR(memcpy)(void *, const void *, size_t); +void *SAN_INTERCEPTOR(memmove)(void *, const void *, size_t); +int SAN_INTERCEPTOR(memcmp)(const void *, const void *, size_t); +#ifndef SAN_RUNTIME +#define bcopy(from, to, len) SAN_INTERCEPTOR(memmove)((to), (from), (len)) +#define bzero(buf, len) SAN_INTERCEPTOR(memset)((buf), 0, (len)) +#define bcmp(b1, b2, len) SAN_INTERCEPTOR(memcmp)((b1), (b2), (len)) +#define memset(buf, c, len) SAN_INTERCEPTOR(memset)((buf), (c), (len)) +#define memcpy(to, from, len) SAN_INTERCEPTOR(memcpy)((to), (from), (len)) +#define memmove(dest, src, n) SAN_INTERCEPTOR(memmove)((dest), (src), (n)) +#define memcmp(b1, b2, len) SAN_INTERCEPTOR(memcmp)((b1), (b2), (len)) +#endif /* !SAN_RUNTIME */ #else #define bcopy(from, to, len) __builtin_memmove((to), (from), (len)) #define bzero(buf, len) __builtin_memset((buf), 0, (len)) #define bcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) #define memset(buf, c, len) __builtin_memset((buf), (c), (len)) #define memcpy(to, from, len) __builtin_memcpy((to), (from), (len)) #define memmove(dest, src, n) __builtin_memmove((dest), (src), (n)) #define memcmp(b1, b2, len) __builtin_memcmp((b1), (b2), (len)) -#endif +#endif /* !SAN_PREFIX */ void *memset_early(void * _Nonnull buf, int c, size_t len); #define bzero_early(buf, len) memset_early((buf), 0, (len)) void *memcpy_early(void * _Nonnull to, const void * _Nonnull from, size_t len); void *memmove_early(void * _Nonnull dest, const void * _Nonnull src, size_t n); #define bcopy_early(from, to, len) memmove_early((to), (from), (len)) #define copystr(src, dst, len, outlen) ({ \ size_t __r, __len, *__outlen; \ \ __len = (len); \ __outlen = (outlen); \ __r = strlcpy((dst), (src), __len); \ if (__outlen != NULL) \ *__outlen = ((__r >= __len) ? __len : __r + 1); \ ((__r >= __len) ? ENAMETOOLONG : 0); \ }) int copyinstr(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len, size_t * __restrict lencopied); int copyin(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyin_nofault(const void * __restrict udaddr, void * _Nonnull __restrict kaddr, size_t len); int copyout(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); int copyout_nofault(const void * _Nonnull __restrict kaddr, void * __restrict udaddr, size_t len); -#ifdef KCSAN -int kcsan_copyin(const void *, void *, size_t); -int kcsan_copyinstr(const void *, void *, size_t, size_t *); -int kcsan_copyout(const void *, void *, size_t); -#define copyin(u, k, l) kcsan_copyin((u), (k), (l)) -#define copyinstr(u, k, l, lc) kcsan_copyinstr((u), (k), (l), (lc)) -#define copyout(k, u, l) kcsan_copyout((k), (u), (l)) -#endif +#ifdef SAN_PREFIX +int SAN_INTERCEPTOR(copyin)(const void *, void *, size_t); +int SAN_INTERCEPTOR(copyinstr)(const void *, void *, size_t, size_t *); +int SAN_INTERCEPTOR(copyout)(const void *, void *, size_t); +#ifndef SAN_RUNTIME +#define copyin(u, k, l) SAN_INTERCEPTOR(copyin)((u), (k), (l)) +#define copyinstr(u, k, l, lc) SAN_INTERCEPTOR(copyinstr)((u), (k), (l), (lc)) +#define copyout(k, u, l) SAN_INTERCEPTOR(copyout)((k), (u), (l)) +#endif /* !SAN_RUNTIME */ +#endif /* SAN_PREFIX */ int fubyte(volatile const void *base); long fuword(volatile const void *base); int fuword16(volatile const void *base); int32_t fuword32(volatile const void *base); int64_t fuword64(volatile const void *base); int fueword(volatile const void *base, long *val); int fueword32(volatile const void *base, int32_t *val); int fueword64(volatile const void *base, int64_t *val); int subyte(volatile void *base, int byte); int suword(volatile void *base, long word); int suword16(volatile void *base, int word); int suword32(volatile void *base, int32_t word); int suword64(volatile void *base, int64_t word); uint32_t casuword32(volatile uint32_t *base, uint32_t oldval, uint32_t newval); u_long casuword(volatile u_long *p, u_long oldval, u_long newval); int casueword32(volatile uint32_t *base, uint32_t oldval, uint32_t *oldvalp, uint32_t newval); int casueword(volatile u_long *p, u_long oldval, u_long *oldvalp, u_long newval); void realitexpire(void *); int sysbeep(int hertz, int period); void hardclock(int cnt, int usermode); void hardclock_sync(int cpu); void softclock(void *); void statclock(int cnt, int usermode); void profclock(int cnt, int usermode, uintfptr_t pc); int hardclockintr(void); void startprofclock(struct proc *); void stopprofclock(struct proc *); void cpu_startprofclock(void); void cpu_stopprofclock(void); void suspendclock(void); void resumeclock(void); sbintime_t cpu_idleclock(void); void cpu_activeclock(void); void cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt); void cpu_et_frequency(struct eventtimer *et, uint64_t newfreq); extern int cpu_disable_c2_sleep; extern int cpu_disable_c3_sleep; char *kern_getenv(const char *name); void freeenv(char *env); int getenv_int(const char *name, int *data); int getenv_uint(const char *name, unsigned int *data); int getenv_long(const char *name, long *data); int getenv_ulong(const char *name, unsigned long *data); int getenv_string(const char *name, char *data, int size); int getenv_int64(const char *name, int64_t *data); int getenv_uint64(const char *name, uint64_t *data); int getenv_quad(const char *name, quad_t *data); int getenv_bool(const char *name, bool *data); bool getenv_is_true(const char *name); bool getenv_is_false(const char *name); int kern_setenv(const char *name, const char *value); int kern_unsetenv(const char *name); int testenv(const char *name); int getenv_array(const char *name, void *data, int size, int *psize, int type_size, bool allow_signed); #define GETENV_UNSIGNED false /* negative numbers not allowed */ #define GETENV_SIGNED true /* negative numbers allowed */ typedef uint64_t (cpu_tick_f)(void); void set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var); extern cpu_tick_f *cpu_ticks; uint64_t cpu_tickrate(void); uint64_t cputick2usec(uint64_t tick); #include /* Initialize the world */ void consinit(void); void cpu_initclocks(void); void cpu_initclocks_bsp(void); void cpu_initclocks_ap(void); void usrinfoinit(void); /* Finalize the world */ void kern_reboot(int) __dead2; void shutdown_nice(int); /* Stubs for obsolete functions that used to be for interrupt management */ static __inline intrmask_t splhigh(void) { return 0; } static __inline intrmask_t splimp(void) { return 0; } static __inline intrmask_t splnet(void) { return 0; } static __inline intrmask_t spltty(void) { return 0; } static __inline void splx(intrmask_t ipl __unused) { return; } /* * Common `proc' functions are declared here so that proc.h can be included * less often. */ int _sleep(const void * _Nonnull chan, struct lock_object *lock, int pri, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), \ tick_sbt * (timo), 0, C_HARDCLOCK) #define msleep_sbt(chan, mtx, pri, wmesg, bt, pr, flags) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (bt), (pr), \ (flags)) int msleep_spin_sbt(const void * _Nonnull chan, struct mtx *mtx, const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define msleep_spin(chan, mtx, wmesg, timo) \ msleep_spin_sbt((chan), (mtx), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) int pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags); #define pause(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK) #define pause_sig(wmesg, timo) \ pause_sbt((wmesg), tick_sbt * (timo), 0, C_HARDCLOCK | C_CATCH) #define tsleep(chan, pri, wmesg, timo) \ _sleep((chan), NULL, (pri), (wmesg), tick_sbt * (timo), \ 0, C_HARDCLOCK) #define tsleep_sbt(chan, pri, wmesg, bt, pr, flags) \ _sleep((chan), NULL, (pri), (wmesg), (bt), (pr), (flags)) void wakeup(const void *chan); void wakeup_one(const void *chan); void wakeup_any(const void *chan); /* * Common `struct cdev *' stuff are declared here to avoid #include poisoning */ struct cdev; dev_t dev2udev(struct cdev *x); const char *devtoname(struct cdev *cdev); #ifdef __LP64__ size_t devfs_iosize_max(void); size_t iosize_max(void); #endif int poll_no_poll(int events); /* XXX: Should be void nanodelay(u_int nsec); */ void DELAY(int usec); /* Root mount holdback API */ struct root_hold_token { int flags; const char *who; TAILQ_ENTRY(root_hold_token) list; }; struct root_hold_token *root_mount_hold(const char *identifier); void root_mount_hold_token(const char *identifier, struct root_hold_token *h); void root_mount_rel(struct root_hold_token *h); int root_mounted(void); /* * Unit number allocation API. (kern/subr_unit.c) */ struct unrhdr; struct unrhdr *new_unrhdr(int low, int high, struct mtx *mutex); void init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex); void delete_unrhdr(struct unrhdr *uh); void clear_unrhdr(struct unrhdr *uh); void clean_unrhdr(struct unrhdr *uh); void clean_unrhdrl(struct unrhdr *uh); int alloc_unr(struct unrhdr *uh); int alloc_unr_specific(struct unrhdr *uh, u_int item); int alloc_unrl(struct unrhdr *uh); void free_unr(struct unrhdr *uh, u_int item); #ifndef __LP64__ #define UNR64_LOCKED #endif struct unrhdr64 { uint64_t counter; }; static __inline void new_unrhdr64(struct unrhdr64 *unr64, uint64_t low) { unr64->counter = low; } #ifdef UNR64_LOCKED uint64_t alloc_unr64(struct unrhdr64 *); #else static __inline uint64_t alloc_unr64(struct unrhdr64 *unr64) { return (atomic_fetchadd_64(&unr64->counter, 1)); } #endif void intr_prof_stack_use(struct thread *td, struct trapframe *frame); void counted_warning(unsigned *counter, const char *msg); /* * APIs to manage deprecation and obsolescence. */ void _gone_in(int major, const char *msg); void _gone_in_dev(device_t dev, int major, const char *msg); #ifdef NO_OBSOLETE_CODE #define __gone_ok(m, msg) \ _Static_assert(m < P_OSREL_MAJOR(__FreeBSD_version)), \ "Obsolete code: " msg); #else #define __gone_ok(m, msg) #endif #define gone_in(major, msg) __gone_ok(major, msg) _gone_in(major, msg) #define gone_in_dev(dev, major, msg) __gone_ok(major, msg) _gone_in_dev(dev, major, msg) #if defined(INVARIANTS) || defined(WITNESS) #define __diagused #else #define __diagused __unused #endif #endif /* _KERNEL */ __NULLABILITY_PRAGMA_POP #endif /* !_SYS_SYSTM_H_ */